diff --git a/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..02f50806de62deb5ab79cc57218cfa04a83cd7d6 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa0433a7a7c158702a1b2b176fec02c78490e6c9b78b7d112ad9b6f86cdf7a36 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..75961fa4de0350b5741f36033cafe485ea3c7662 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c02430b06b5f09af241a4e5cee182e9e1d5715c08acdd027be18ffa44f13abc +size 33555627 diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__init__.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f9cfe07dc0e88e6b692ec0c5450d44acfd5594a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/__init__.py @@ -0,0 +1,52 @@ +""" +The :mod:`sklearn.decomposition` module includes matrix decomposition +algorithms, including among others PCA, NMF or ICA. Most of the algorithms of +this module can be regarded as dimensionality reduction techniques. +""" + + +from ..utils.extmath import randomized_svd +from ._dict_learning import ( + DictionaryLearning, + MiniBatchDictionaryLearning, + SparseCoder, + dict_learning, + dict_learning_online, + sparse_encode, +) +from ._factor_analysis import FactorAnalysis +from ._fastica import FastICA, fastica +from ._incremental_pca import IncrementalPCA +from ._kernel_pca import KernelPCA +from ._lda import LatentDirichletAllocation +from ._nmf import ( + NMF, + MiniBatchNMF, + non_negative_factorization, +) +from ._pca import PCA +from ._sparse_pca import MiniBatchSparsePCA, SparsePCA +from ._truncated_svd import TruncatedSVD + +__all__ = [ + "DictionaryLearning", + "FastICA", + "IncrementalPCA", + "KernelPCA", + "MiniBatchDictionaryLearning", + "MiniBatchNMF", + "MiniBatchSparsePCA", + "NMF", + "PCA", + "SparseCoder", + "SparsePCA", + "dict_learning", + "dict_learning_online", + "fastica", + "non_negative_factorization", + "randomized_svd", + "sparse_encode", + "FactorAnalysis", + "TruncatedSVD", + "LatentDirichletAllocation", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a107d17d19edc2693653662f6cb554ca4919c2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f58dd24150acdc27627d60411ea6f79bd0158373 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c4c5373a8b4732ea9aba4e5df664e64f7b34de0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f0711dbc637890625dd239bb5d2981706f3b50 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_base.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa720751774f794dc92263c66034966cca3307a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_base.py @@ -0,0 +1,193 @@ +"""Principal Component Analysis Base Classes""" + +# Author: Alexandre Gramfort +# Olivier Grisel +# Mathieu Blondel +# Denis A. Engemann +# Kyle Kastner +# +# License: BSD 3 clause + +from abc import ABCMeta, abstractmethod + +import numpy as np +from scipy import linalg +from scipy.sparse import issparse + +from ..base import BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin +from ..utils._array_api import _add_to_diagonal, device, get_namespace +from ..utils.sparsefuncs import _implicit_column_offset +from ..utils.validation import check_is_fitted + + +class _BasePCA( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta +): + """Base class for PCA methods. + + Warning: This class should not be used directly. + Use derived classes instead. + """ + + def get_covariance(self): + """Compute data covariance with the generative model. + + ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` + where S**2 contains the explained variances, and sigma2 contains the + noise variances. + + Returns + ------- + cov : array of shape=(n_features, n_features) + Estimated covariance of data. + """ + xp, _ = get_namespace(self.components_) + + components_ = self.components_ + exp_var = self.explained_variance_ + if self.whiten: + components_ = components_ * xp.sqrt(exp_var[:, np.newaxis]) + exp_var_diff = exp_var - self.noise_variance_ + exp_var_diff = xp.where( + exp_var > self.noise_variance_, + exp_var_diff, + xp.asarray(0.0, device=device(exp_var)), + ) + cov = (components_.T * exp_var_diff) @ components_ + _add_to_diagonal(cov, self.noise_variance_, xp) + return cov + + def get_precision(self): + """Compute data precision matrix with the generative model. + + Equals the inverse of the covariance but computed with + the matrix inversion lemma for efficiency. + + Returns + ------- + precision : array, shape=(n_features, n_features) + Estimated precision of data. + """ + xp, is_array_api_compliant = get_namespace(self.components_) + + n_features = self.components_.shape[1] + + # handle corner cases first + if self.n_components_ == 0: + return xp.eye(n_features) / self.noise_variance_ + + if is_array_api_compliant: + linalg_inv = xp.linalg.inv + else: + linalg_inv = linalg.inv + + if self.noise_variance_ == 0.0: + return linalg_inv(self.get_covariance()) + + # Get precision using matrix inversion lemma + components_ = self.components_ + exp_var = self.explained_variance_ + if self.whiten: + components_ = components_ * xp.sqrt(exp_var[:, np.newaxis]) + exp_var_diff = exp_var - self.noise_variance_ + exp_var_diff = xp.where( + exp_var > self.noise_variance_, + exp_var_diff, + xp.asarray(0.0, device=device(exp_var)), + ) + precision = components_ @ components_.T / self.noise_variance_ + _add_to_diagonal(precision, 1.0 / exp_var_diff, xp) + precision = components_.T @ linalg_inv(precision) @ components_ + precision /= -(self.noise_variance_**2) + _add_to_diagonal(precision, 1.0 / self.noise_variance_, xp) + return precision + + @abstractmethod + def fit(self, X, y=None): + """Placeholder for fit. Subclasses should implement this method! + + Fit the model with X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + self : object + Returns the instance itself. + """ + + def transform(self, X): + """Apply dimensionality reduction to X. + + X is projected on the first principal components previously extracted + from a training set. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : array-like of shape (n_samples, n_components) + Projection of X in the first principal components, where `n_samples` + is the number of samples and `n_components` is the number of the components. + """ + xp, _ = get_namespace(X) + + check_is_fitted(self) + + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[xp.float64, xp.float32], reset=False + ) + if self.mean_ is not None: + if issparse(X): + X = _implicit_column_offset(X, self.mean_) + else: + X = X - self.mean_ + X_transformed = X @ self.components_.T + if self.whiten: + X_transformed /= xp.sqrt(self.explained_variance_) + return X_transformed + + def inverse_transform(self, X): + """Transform data back to its original space. + + In other words, return an input `X_original` whose transform would be X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + New data, where `n_samples` is the number of samples + and `n_components` is the number of components. + + Returns + ------- + X_original array-like of shape (n_samples, n_features) + Original data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Notes + ----- + If whitening is enabled, inverse_transform will compute the + exact inverse operation, which includes reversing whitening. + """ + xp, _ = get_namespace(X) + + if self.whiten: + scaled_components = ( + xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_ + ) + return X @ scaled_components + self.mean_ + else: + return X @ self.components_ + self.mean_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4a2a56a99276d26b8a4ed781065b1e81071808c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py new file mode 100644 index 0000000000000000000000000000000000000000..51350aa5e05bdbb0a9a8b691837d2476f3198981 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py @@ -0,0 +1,2301 @@ +""" Dictionary learning. +""" +# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort +# License: BSD 3 clause + +import itertools +import sys +import time +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from joblib import effective_n_jobs +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..linear_model import Lars, Lasso, LassoLars, orthogonal_mp_gram +from ..utils import check_array, check_random_state, gen_batches, gen_even_slices +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.extmath import randomized_svd, row_norms, svd_flip +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted + + +def _check_positive_coding(method, positive): + if positive and method in ["omp", "lars"]: + raise ValueError( + "Positive constraint not supported for '{}' coding method.".format(method) + ) + + +def _sparse_encode_precomputed( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + regularization=None, + copy_cov=True, + init=None, + max_iter=1000, + verbose=0, + positive=False, +): + """Generic sparse coding with precomputed Gram and/or covariance matrices. + + Each row of the result is the solution to a Lasso problem. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data matrix. + + dictionary : ndarray of shape (n_components, n_features) + The dictionary matrix against which to solve the sparse coding of + the data. Some of the algorithms assume normalized rows. + + gram : ndarray of shape (n_components, n_components), default=None + Precomputed Gram matrix, `dictionary * dictionary'` + gram can be `None` if method is 'threshold'. + + cov : ndarray of shape (n_components, n_samples), default=None + Precomputed covariance, `dictionary * X'`. + + algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \ + default='lasso_lars' + The algorithm used: + + * `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + * `'lasso_lars'`: uses Lars to compute the Lasso solution; + * `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if + the estimated components are sparse; + * `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + * `'threshold'`: squashes to zero all coefficients less than + regularization from the projection `dictionary * data'`. + + regularization : int or float, default=None + The regularization parameter. It corresponds to alpha when + algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`. + Otherwise it corresponds to `n_nonzero_coefs`. + + init : ndarray of shape (n_samples, n_components), default=None + Initialization value of the sparse code. Only used if + `algorithm='lasso_cd'`. + + max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + copy_cov : bool, default=True + Whether to copy the precomputed covariance matrix; if `False`, it may + be overwritten. + + verbose : int, default=0 + Controls the verbosity; the higher, the more messages. + + positive: bool, default=False + Whether to enforce a positivity constraint on the sparse code. + + .. versionadded:: 0.20 + + Returns + ------- + code : ndarray of shape (n_components, n_features) + The sparse codes. + """ + n_samples, n_features = X.shape + n_components = dictionary.shape[0] + + if algorithm == "lasso_lars": + alpha = float(regularization) / n_features # account for scaling + try: + err_mgt = np.seterr(all="ignore") + + # Not passing in verbose=max(0, verbose-1) because Lars.fit already + # corrects the verbosity level. + lasso_lars = LassoLars( + alpha=alpha, + fit_intercept=False, + verbose=verbose, + precompute=gram, + fit_path=False, + positive=positive, + max_iter=max_iter, + ) + lasso_lars.fit(dictionary.T, X.T, Xy=cov) + new_code = lasso_lars.coef_ + finally: + np.seterr(**err_mgt) + + elif algorithm == "lasso_cd": + alpha = float(regularization) / n_features # account for scaling + + # TODO: Make verbosity argument for Lasso? + # sklearn.linear_model.coordinate_descent.enet_path has a verbosity + # argument that we could pass in from Lasso. + clf = Lasso( + alpha=alpha, + fit_intercept=False, + precompute=gram, + max_iter=max_iter, + warm_start=True, + positive=positive, + ) + + if init is not None: + # In some workflows using coordinate descent algorithms: + # - users might provide NumPy arrays with read-only buffers + # - `joblib` might memmap arrays making their buffer read-only + # TODO: move this handling (which is currently too broad) + # closer to the actual private function which need buffers to be writable. + if not init.flags["WRITEABLE"]: + init = np.array(init) + clf.coef_ = init + + clf.fit(dictionary.T, X.T, check_input=False) + new_code = clf.coef_ + + elif algorithm == "lars": + try: + err_mgt = np.seterr(all="ignore") + + # Not passing in verbose=max(0, verbose-1) because Lars.fit already + # corrects the verbosity level. + lars = Lars( + fit_intercept=False, + verbose=verbose, + precompute=gram, + n_nonzero_coefs=int(regularization), + fit_path=False, + ) + lars.fit(dictionary.T, X.T, Xy=cov) + new_code = lars.coef_ + finally: + np.seterr(**err_mgt) + + elif algorithm == "threshold": + new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T + if positive: + np.clip(new_code, 0, None, out=new_code) + + elif algorithm == "omp": + new_code = orthogonal_mp_gram( + Gram=gram, + Xy=cov, + n_nonzero_coefs=int(regularization), + tol=None, + norms_squared=row_norms(X, squared=True), + copy_Xy=copy_cov, + ).T + + return new_code.reshape(n_samples, n_components) + + +@validate_params( + { + "X": ["array-like"], + "dictionary": ["array-like"], + "gram": ["array-like", None], + "cov": ["array-like", None], + "algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left"), None], + "copy_cov": ["boolean"], + "init": ["array-like", None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "n_jobs": [Integral, None], + "check_input": ["boolean"], + "verbose": ["verbose"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +# XXX : could be moved to the linear_model module +def sparse_encode( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + n_nonzero_coefs=None, + alpha=None, + copy_cov=True, + init=None, + max_iter=1000, + n_jobs=None, + check_input=True, + verbose=0, + positive=False, +): + """Sparse coding. + + Each row of the result is the solution to a sparse coding problem. + The goal is to find a sparse array `code` such that:: + + X ~= code * dictionary + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data matrix. + + dictionary : array-like of shape (n_components, n_features) + The dictionary matrix against which to solve the sparse coding of + the data. Some of the algorithms assume normalized rows for meaningful + output. + + gram : array-like of shape (n_components, n_components), default=None + Precomputed Gram matrix, `dictionary * dictionary'`. + + cov : array-like of shape (n_components, n_samples), default=None + Precomputed covariance, `dictionary' * X`. + + algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \ + default='lasso_lars' + The algorithm used: + + * `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + * `'lasso_lars'`: uses Lars to compute the Lasso solution; + * `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if + the estimated components are sparse; + * `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + * `'threshold'`: squashes to zero all coefficients less than + regularization from the projection `dictionary * data'`. + + n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and `algorithm='omp'` + and is overridden by `alpha` in the `omp` case. If `None`, then + `n_nonzero_coefs=int(n_features / 10)`. + + alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of + the reconstruction error targeted. In this case, it overrides + `n_nonzero_coefs`. + If `None`, default to 1. + + copy_cov : bool, default=True + Whether to copy the precomputed covariance matrix; if `False`, it may + be overwritten. + + init : ndarray of shape (n_samples, n_components), default=None + Initialization value of the sparse codes. Only used if + `algorithm='lasso_cd'`. + + max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + check_input : bool, default=True + If `False`, the input arrays X and dictionary will not be checked. + + verbose : int, default=0 + Controls the verbosity; the higher, the more messages. + + positive : bool, default=False + Whether to enforce positivity when finding the encoding. + + .. versionadded:: 0.20 + + Returns + ------- + code : ndarray of shape (n_samples, n_components) + The sparse codes. + + See Also + -------- + sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems. + sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer. + SparseCoder : Find a sparse representation of data from a fixed precomputed + dictionary. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import sparse_encode + >>> X = np.array([[-1, -1, -1], [0, 0, 3]]) + >>> dictionary = np.array( + ... [[0, 1, 0], + ... [-1, -1, 2], + ... [1, 1, 1], + ... [0, 1, 1], + ... [0, 2, 1]], + ... dtype=np.float64 + ... ) + >>> sparse_encode(X, dictionary, alpha=1e-10) + array([[ 0., 0., -1., 0., 0.], + [ 0., 1., 1., 0., 0.]]) + """ + if check_input: + if algorithm == "lasso_cd": + dictionary = check_array( + dictionary, order="C", dtype=[np.float64, np.float32] + ) + X = check_array(X, order="C", dtype=[np.float64, np.float32]) + else: + dictionary = check_array(dictionary) + X = check_array(X) + + if dictionary.shape[1] != X.shape[1]: + raise ValueError( + "Dictionary and X have different numbers of features:" + "dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape) + ) + + _check_positive_coding(algorithm, positive) + + return _sparse_encode( + X, + dictionary, + gram=gram, + cov=cov, + algorithm=algorithm, + n_nonzero_coefs=n_nonzero_coefs, + alpha=alpha, + copy_cov=copy_cov, + init=init, + max_iter=max_iter, + n_jobs=n_jobs, + verbose=verbose, + positive=positive, + ) + + +def _sparse_encode( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + n_nonzero_coefs=None, + alpha=None, + copy_cov=True, + init=None, + max_iter=1000, + n_jobs=None, + verbose=0, + positive=False, +): + """Sparse coding without input/parameter validation.""" + + n_samples, n_features = X.shape + n_components = dictionary.shape[0] + + if algorithm in ("lars", "omp"): + regularization = n_nonzero_coefs + if regularization is None: + regularization = min(max(n_features / 10, 1), n_components) + else: + regularization = alpha + if regularization is None: + regularization = 1.0 + + if gram is None and algorithm != "threshold": + gram = np.dot(dictionary, dictionary.T) + + if cov is None and algorithm != "lasso_cd": + copy_cov = False + cov = np.dot(dictionary, X.T) + + if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold": + code = _sparse_encode_precomputed( + X, + dictionary, + gram=gram, + cov=cov, + algorithm=algorithm, + regularization=regularization, + copy_cov=copy_cov, + init=init, + max_iter=max_iter, + verbose=verbose, + positive=positive, + ) + return code + + # Enter parallel code block + n_samples = X.shape[0] + n_components = dictionary.shape[0] + code = np.empty((n_samples, n_components)) + slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs))) + + code_views = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(_sparse_encode_precomputed)( + X[this_slice], + dictionary, + gram=gram, + cov=cov[:, this_slice] if cov is not None else None, + algorithm=algorithm, + regularization=regularization, + copy_cov=copy_cov, + init=init[this_slice] if init is not None else None, + max_iter=max_iter, + verbose=verbose, + positive=positive, + ) + for this_slice in slices + ) + for this_slice, this_view in zip(slices, code_views): + code[this_slice] = this_view + return code + + +def _update_dict( + dictionary, + Y, + code, + A=None, + B=None, + verbose=False, + random_state=None, + positive=False, +): + """Update the dense dictionary factor in place. + + Parameters + ---------- + dictionary : ndarray of shape (n_components, n_features) + Value of the dictionary at the previous iteration. + + Y : ndarray of shape (n_samples, n_features) + Data matrix. + + code : ndarray of shape (n_samples, n_components) + Sparse coding of the data against which to optimize the dictionary. + + A : ndarray of shape (n_components, n_components), default=None + Together with `B`, sufficient stats of the online model to update the + dictionary. + + B : ndarray of shape (n_features, n_components), default=None + Together with `A`, sufficient stats of the online model to update the + dictionary. + + verbose: bool, default=False + Degree of output the procedure will print. + + random_state : int, RandomState instance or None, default=None + Used for randomly initializing the dictionary. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + positive : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + """ + n_samples, n_components = code.shape + random_state = check_random_state(random_state) + + if A is None: + A = code.T @ code + if B is None: + B = Y.T @ code + + n_unused = 0 + + for k in range(n_components): + if A[k, k] > 1e-6: + # 1e-6 is arbitrary but consistent with the spams implementation + dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k] + else: + # kth atom is almost never used -> sample a new one from the data + newd = Y[random_state.choice(n_samples)] + + # add small noise to avoid making the sparse coding ill conditioned + noise_level = 0.01 * (newd.std() or 1) # avoid 0 std + noise = random_state.normal(0, noise_level, size=len(newd)) + + dictionary[k] = newd + noise + code[:, k] = 0 + n_unused += 1 + + if positive: + np.clip(dictionary[k], 0, None, out=dictionary[k]) + + # Projection on the constraint set ||V_k|| <= 1 + dictionary[k] /= max(linalg.norm(dictionary[k]), 1) + + if verbose and n_unused > 0: + print(f"{n_unused} unused atoms resampled.") + + +def _dict_learning( + X, + n_components, + *, + alpha, + max_iter, + tol, + method, + n_jobs, + dict_init, + code_init, + callback, + verbose, + random_state, + return_n_iter, + positive_dict, + positive_code, + method_max_iter, +): + """Main dictionary learning algorithm""" + t0 = time.time() + # Init the code and the dictionary with SVD of Y + if code_init is not None and dict_init is not None: + code = np.array(code_init, order="F") + # Don't copy V, it will happen below + dictionary = dict_init + else: + code, S, dictionary = linalg.svd(X, full_matrices=False) + # flip the initial code's sign to enforce deterministic output + code, dictionary = svd_flip(code, dictionary) + dictionary = S[:, np.newaxis] * dictionary + r = len(dictionary) + if n_components <= r: # True even if n_components=None + code = code[:, :n_components] + dictionary = dictionary[:n_components, :] + else: + code = np.c_[code, np.zeros((len(code), n_components - r))] + dictionary = np.r_[ + dictionary, np.zeros((n_components - r, dictionary.shape[1])) + ] + + # Fortran-order dict better suited for the sparse coding which is the + # bottleneck of this algorithm. + dictionary = np.asfortranarray(dictionary) + + errors = [] + current_cost = np.nan + + if verbose == 1: + print("[dict_learning]", end=" ") + + # If max_iter is 0, number of iterations returned should be zero + ii = -1 + + for ii in range(max_iter): + dt = time.time() - t0 + if verbose == 1: + sys.stdout.write(".") + sys.stdout.flush() + elif verbose: + print( + "Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" + % (ii, dt, dt / 60, current_cost) + ) + + # Update code + code = sparse_encode( + X, + dictionary, + algorithm=method, + alpha=alpha, + init=code, + n_jobs=n_jobs, + positive=positive_code, + max_iter=method_max_iter, + verbose=verbose, + ) + + # Update dictionary in place + _update_dict( + dictionary, + X, + code, + verbose=verbose, + random_state=random_state, + positive=positive_dict, + ) + + # Cost function + current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum( + np.abs(code) + ) + errors.append(current_cost) + + if ii > 0: + dE = errors[-2] - errors[-1] + # assert(dE >= -tol * errors[-1]) + if dE < tol * errors[-1]: + if verbose == 1: + # A line return + print("") + elif verbose: + print("--- Convergence reached after %d iterations" % ii) + break + if ii % 5 == 0 and callback is not None: + callback(locals()) + + if return_n_iter: + return code, dictionary, errors, ii + 1 + else: + return code, dictionary, errors + + +def dict_learning_online( + X, + n_components=2, + *, + alpha=1, + max_iter=100, + return_code=True, + dict_init=None, + callback=None, + batch_size=256, + verbose=False, + shuffle=True, + n_jobs=None, + method="lars", + random_state=None, + positive_dict=False, + positive_code=False, + method_max_iter=1000, + tol=1e-3, + max_no_improvement=10, +): + """Solve a dictionary learning matrix factorization problem online. + + Finds the best dictionary and the corresponding sparse code for + approximating the data matrix X by solving:: + + (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 = 1 for all 0 <= k < n_components + + where V is the dictionary and U is the sparse code. ||.||_Fro stands for + the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm + which is the sum of the absolute values of all the entries in the matrix. + This is accomplished by repeatedly iterating over mini-batches by slicing + the input data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data matrix. + + n_components : int or None, default=2 + Number of dictionary atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1 + Sparsity controlling parameter. + + max_iter : int, default=100 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.1 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `100`) instead. + + return_code : bool, default=True + Whether to also return the code U or just the dictionary `V`. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial values for the dictionary for warm restart scenarios. + If `None`, the initial values for the dictionary are created + with an SVD decomposition of the data via + :func:`~sklearn.utils.extmath.randomized_svd`. + + callback : callable, default=None + A callable that gets invoked at the end of each iteration. + + batch_size : int, default=256 + The number of samples to take in each batch. + + .. versionchanged:: 1.3 + The default value of `batch_size` changed from 3 to 256 in version 1.3. + + verbose : bool, default=False + To control the verbosity of the procedure. + + shuffle : bool, default=True + Whether to shuffle the data before splitting it in batches. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + method : {'lars', 'cd'}, default='lars' + * `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + method_max_iter : int, default=1000 + Maximum number of iterations to perform when solving the lasso problem. + + .. versionadded:: 0.22 + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + .. versionadded:: 1.1 + + Returns + ------- + code : ndarray of shape (n_samples, n_components), + The sparse code (only returned if `return_code=True`). + + dictionary : ndarray of shape (n_components, n_features), + The solutions to the dictionary learning problem. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to `True`. + + See Also + -------- + dict_learning : Solve a dictionary learning matrix factorization problem. + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary + learning algorithm. + SparsePCA : Sparse Principal Components Analysis. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import dict_learning_online + >>> X, _, _ = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> U, V = dict_learning_online( + ... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42 + ... ) + + We can check the level of sparsity of `U`: + + >>> np.mean(U == 0) + 0.53... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = U @ V + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.05... + """ + # TODO(1.6): remove in 1.6 + if max_iter is None: + warn( + ( + "`max_iter=None` is deprecated in version 1.4 and will be removed in " + "version 1.6. Use the default value (i.e. `100`) instead." + ), + FutureWarning, + ) + max_iter = 100 + + transform_algorithm = "lasso_" + method + + est = MiniBatchDictionaryLearning( + n_components=n_components, + alpha=alpha, + max_iter=max_iter, + n_jobs=n_jobs, + fit_algorithm=method, + batch_size=batch_size, + shuffle=shuffle, + dict_init=dict_init, + random_state=random_state, + transform_algorithm=transform_algorithm, + transform_alpha=alpha, + positive_code=positive_code, + positive_dict=positive_dict, + transform_max_iter=method_max_iter, + verbose=verbose, + callback=callback, + tol=tol, + max_no_improvement=max_no_improvement, + ).fit(X) + + if not return_code: + return est.components_ + else: + code = est.transform(X) + return code, est.components_ + + +@validate_params( + { + "X": ["array-like"], + "method": [StrOptions({"lars", "cd"})], + "return_n_iter": ["boolean"], + "method_max_iter": [Interval(Integral, 0, None, closed="left")], + }, + prefer_skip_nested_validation=False, +) +def dict_learning( + X, + n_components, + *, + alpha, + max_iter=100, + tol=1e-8, + method="lars", + n_jobs=None, + dict_init=None, + code_init=None, + callback=None, + verbose=False, + random_state=None, + return_n_iter=False, + positive_dict=False, + positive_code=False, + method_max_iter=1000, +): + """Solve a dictionary learning matrix factorization problem. + + Finds the best dictionary and the corresponding sparse code for + approximating the data matrix X by solving:: + + (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 = 1 for all 0 <= k < n_components + + where V is the dictionary and U is the sparse code. ||.||_Fro stands for + the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm + which is the sum of the absolute values of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data matrix. + + n_components : int + Number of dictionary atoms to extract. + + alpha : int or float + Sparsity controlling parameter. + + max_iter : int, default=100 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for the stopping condition. + + method : {'lars', 'cd'}, default='lars' + The method used: + + * `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial value for the dictionary for warm restart scenarios. Only used + if `code_init` and `dict_init` are not None. + + code_init : ndarray of shape (n_samples, n_components), default=None + Initial value for the sparse code for warm restart scenarios. Only used + if `code_init` and `dict_init` are not None. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + verbose : bool, default=False + To control the verbosity of the procedure. + + random_state : int, RandomState instance or None, default=None + Used for randomly initializing the dictionary. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + method_max_iter : int, default=1000 + Maximum number of iterations to perform. + + .. versionadded:: 0.22 + + Returns + ------- + code : ndarray of shape (n_samples, n_components) + The sparse code factor in the matrix factorization. + + dictionary : ndarray of shape (n_components, n_features), + The dictionary factor in the matrix factorization. + + errors : array + Vector of errors at each iteration. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to True. + + See Also + -------- + dict_learning_online : Solve a dictionary learning matrix factorization + problem online. + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate version + of the dictionary learning algorithm. + SparsePCA : Sparse Principal Components Analysis. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import dict_learning + >>> X, _, _ = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42) + + We can check the level of sparsity of `U`: + + >>> np.mean(U == 0) + 0.6... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = U @ V + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.01... + """ + estimator = DictionaryLearning( + n_components=n_components, + alpha=alpha, + max_iter=max_iter, + tol=tol, + fit_algorithm=method, + n_jobs=n_jobs, + dict_init=dict_init, + callback=callback, + code_init=code_init, + verbose=verbose, + random_state=random_state, + positive_code=positive_code, + positive_dict=positive_dict, + transform_max_iter=method_max_iter, + ).set_output(transform="default") + code = estimator.fit_transform(X) + if return_n_iter: + return ( + code, + estimator.components_, + estimator.error_, + estimator.n_iter_, + ) + return code, estimator.components_, estimator.error_ + + +class _BaseSparseCoding(ClassNamePrefixFeaturesOutMixin, TransformerMixin): + """Base class from SparseCoder and DictionaryLearning algorithms.""" + + def __init__( + self, + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ): + self.transform_algorithm = transform_algorithm + self.transform_n_nonzero_coefs = transform_n_nonzero_coefs + self.transform_alpha = transform_alpha + self.transform_max_iter = transform_max_iter + self.split_sign = split_sign + self.n_jobs = n_jobs + self.positive_code = positive_code + + def _transform(self, X, dictionary): + """Private method allowing to accommodate both DictionaryLearning and + SparseCoder.""" + X = self._validate_data(X, reset=False) + + if hasattr(self, "alpha") and self.transform_alpha is None: + transform_alpha = self.alpha + else: + transform_alpha = self.transform_alpha + + code = sparse_encode( + X, + dictionary, + algorithm=self.transform_algorithm, + n_nonzero_coefs=self.transform_n_nonzero_coefs, + alpha=transform_alpha, + max_iter=self.transform_max_iter, + n_jobs=self.n_jobs, + positive=self.positive_code, + ) + + if self.split_sign: + # feature vector is split into a positive and negative side + n_samples, n_features = code.shape + split_code = np.empty((n_samples, 2 * n_features)) + split_code[:, :n_features] = np.maximum(code, 0) + split_code[:, n_features:] = -np.minimum(code, 0) + code = split_code + + return code + + def transform(self, X): + """Encode the data as a sparse combination of the dictionary atoms. + + Coding method is determined by the object parameter + `transform_algorithm`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test data to be transformed, must have the same number of + features as the data used to train the model. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + return self._transform(X, self.components_) + + +class SparseCoder(_BaseSparseCoding, BaseEstimator): + """Sparse coding. + + Finds a sparse representation of data against a fixed, precomputed + dictionary. + + Each row of the result is the solution to a sparse coding problem. + The goal is to find a sparse array `code` such that:: + + X ~= code * dictionary + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + dictionary : ndarray of shape (n_components, n_features) + The dictionary atoms used for sparse coding. Lines are assumed to be + normalized to unit norm. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution; + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if + the estimated components are sparse; + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and `algorithm='omp'` + and is overridden by `alpha` in the `omp` case. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of + the reconstruction error targeted. In this case, it overrides + `n_nonzero_coefs`. + If `None`, default to 1. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `lasso_lars`. + + .. versionadded:: 0.22 + + Attributes + ---------- + n_components_ : int + Number of atoms. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate, version of the + dictionary learning algorithm. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparsePCA : Sparse Principal Components Analysis. + sparse_encode : Sparse coding where each row of the result is the solution + to a sparse coding problem. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import SparseCoder + >>> X = np.array([[-1, -1, -1], [0, 0, 3]]) + >>> dictionary = np.array( + ... [[0, 1, 0], + ... [-1, -1, 2], + ... [1, 1, 1], + ... [0, 1, 1], + ... [0, 2, 1]], + ... dtype=np.float64 + ... ) + >>> coder = SparseCoder( + ... dictionary=dictionary, transform_algorithm='lasso_lars', + ... transform_alpha=1e-10, + ... ) + >>> coder.transform(X) + array([[ 0., 0., -1., 0., 0.], + [ 0., 1., 1., 0., 0.]]) + """ + + _required_parameters = ["dictionary"] + + def __init__( + self, + dictionary, + *, + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + split_sign=False, + n_jobs=None, + positive_code=False, + transform_max_iter=1000, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.dictionary = dictionary + + def fit(self, X, y=None): + """Do nothing and return the estimator unchanged. + + This method is just there to implement the usual API and hence + work in pipelines. + + Parameters + ---------- + X : Ignored + Not used, present for API consistency by convention. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + return self + + def transform(self, X, y=None): + """Encode the data as a sparse combination of the dictionary atoms. + + Coding method is determined by the object parameter + `transform_algorithm`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + return super()._transform(X, self.dictionary) + + def _more_tags(self): + return { + "requires_fit": False, + "preserves_dtype": [np.float64, np.float32], + } + + @property + def n_components_(self): + """Number of atoms.""" + return self.dictionary.shape[0] + + @property + def n_features_in_(self): + """Number of features seen during `fit`.""" + return self.dictionary.shape[1] + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.n_components_ + + +class DictionaryLearning(_BaseSparseCoding, BaseEstimator): + """Dictionary learning. + + Finds a dictionary (a set of atoms) that performs well at sparsely + encoding the fitted data. + + Solves the optimization problem:: + + (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 <= 1 for all 0 <= k < n_components + + ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for + the entry-wise matrix norm which is the sum of the absolute values + of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of dictionary elements to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1.0 + Sparsity controlling parameter. + + max_iter : int, default=1000 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for numerical error. + + fit_algorithm : {'lars', 'cd'}, default='lars' + * `'lars'`: uses the least angle regression method to solve the lasso + problem (:func:`~sklearn.linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be + faster if the estimated components are sparse. + + .. versionadded:: 0.17 + *cd* coordinate descent method to improve speed. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (:func:`~sklearn.linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution. + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'` + will be faster if the estimated components are sparse. + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution. + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + .. versionadded:: 0.17 + *lasso_cd* coordinate descent method to improve speed. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and + `algorithm='omp'`. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `None`, defaults to `alpha`. + + .. versionchanged:: 1.2 + When None, default value changed from 1.0 to `alpha`. + + n_jobs : int or None, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + code_init : ndarray of shape (n_samples, n_components), default=None + Initial value for the code, for warm restart. Only used if `code_init` + and `dict_init` are not None. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial values for the dictionary, for warm restart. Only used if + `code_init` and `dict_init` are not None. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + .. versionadded:: 1.3 + + verbose : bool, default=False + To control the verbosity of the procedure. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + .. versionadded:: 0.22 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + dictionary atoms extracted from the data + + error_ : array + vector of errors at each iteration + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + MiniBatchDictionaryLearning: A faster, less accurate, version of the + dictionary learning algorithm. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + + References + ---------- + + J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning + for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import DictionaryLearning + >>> X, dictionary, code = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> dict_learner = DictionaryLearning( + ... n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.1, + ... random_state=42, + ... ) + >>> X_transformed = dict_learner.fit(X).transform(X) + + We can check the level of sparsity of `X_transformed`: + + >>> np.mean(X_transformed == 0) + 0.52... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = X_transformed @ dict_learner.components_ + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.05... + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "fit_algorithm": [StrOptions({"lars", "cd"})], + "transform_algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "transform_alpha": [Interval(Real, 0, None, closed="left"), None], + "n_jobs": [Integral, None], + "code_init": [np.ndarray, None], + "dict_init": [np.ndarray, None], + "callback": [callable, None], + "verbose": ["verbose"], + "split_sign": ["boolean"], + "random_state": ["random_state"], + "positive_code": ["boolean"], + "positive_dict": ["boolean"], + "transform_max_iter": [Interval(Integral, 0, None, closed="left")], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + max_iter=1000, + tol=1e-8, + fit_algorithm="lars", + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + n_jobs=None, + code_init=None, + dict_init=None, + callback=None, + verbose=False, + split_sign=False, + random_state=None, + positive_code=False, + positive_dict=False, + transform_max_iter=1000, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.n_components = n_components + self.alpha = alpha + self.max_iter = max_iter + self.tol = tol + self.fit_algorithm = fit_algorithm + self.code_init = code_init + self.dict_init = dict_init + self.callback = callback + self.verbose = verbose + self.random_state = random_state + self.positive_dict = positive_dict + + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self.fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model from data in X and return the transformed data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + V : ndarray of shape (n_samples, n_components) + Transformed data. + """ + _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code) + + method = "lasso_" + self.fit_algorithm + + random_state = check_random_state(self.random_state) + X = self._validate_data(X) + + if self.n_components is None: + n_components = X.shape[1] + else: + n_components = self.n_components + + V, U, E, self.n_iter_ = _dict_learning( + X, + n_components, + alpha=self.alpha, + tol=self.tol, + max_iter=self.max_iter, + method=method, + method_max_iter=self.transform_max_iter, + n_jobs=self.n_jobs, + code_init=self.code_init, + dict_init=self.dict_init, + callback=self.callback, + verbose=self.verbose, + random_state=random_state, + return_n_iter=True, + positive_dict=self.positive_dict, + positive_code=self.positive_code, + ) + self.components_ = U + self.error_ = E + + return V + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } + + +class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator): + """Mini-batch dictionary learning. + + Finds a dictionary (a set of atoms) that performs well at sparsely + encoding the fitted data. + + Solves the optimization problem:: + + (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 <= 1 for all 0 <= k < n_components + + ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for + the entry-wise matrix norm which is the sum of the absolute values + of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of dictionary elements to extract. + + alpha : float, default=1 + Sparsity controlling parameter. + + max_iter : int, default=1_000 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.1 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `1_000`) instead. + + fit_algorithm : {'lars', 'cd'}, default='lars' + The algorithm used: + + - `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`) + - `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + batch_size : int, default=256 + Number of samples in each mini-batch. + + .. versionchanged:: 1.3 + The default value of `batch_size` changed from 3 to 256 in version 1.3. + + shuffle : bool, default=True + Whether to shuffle the samples before forming batches. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial value of the dictionary for warm restart scenarios. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution. + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster + if the estimated components are sparse. + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution. + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and + `algorithm='omp'`. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `None`, defaults to `alpha`. + + .. versionchanged:: 1.2 + When None, default value changed from 1.0 to `alpha`. + + verbose : bool or int, default=False + To control the verbosity of the procedure. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + .. versionadded:: 0.22 + + callback : callable, default=None + A callable that gets invoked at the end of each iteration. + + .. versionadded:: 1.1 + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + .. versionadded:: 1.1 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Components extracted from the data. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations over the full dataset. + + n_steps_ : int + Number of mini-batches processed. + + .. versionadded:: 1.1 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + + References + ---------- + + J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning + for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import MiniBatchDictionaryLearning + >>> X, dictionary, code = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42) + >>> dict_learner = MiniBatchDictionaryLearning( + ... n_components=15, batch_size=3, transform_algorithm='lasso_lars', + ... transform_alpha=0.1, max_iter=20, random_state=42) + >>> X_transformed = dict_learner.fit_transform(X) + + We can check the level of sparsity of `X_transformed`: + + >>> np.mean(X_transformed == 0) > 0.5 + True + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = X_transformed @ dict_learner.components_ + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.052... + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)], + "fit_algorithm": [StrOptions({"cd", "lars"})], + "n_jobs": [None, Integral], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "dict_init": [None, np.ndarray], + "transform_algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "transform_alpha": [Interval(Real, 0, None, closed="left"), None], + "verbose": ["verbose"], + "split_sign": ["boolean"], + "random_state": ["random_state"], + "positive_code": ["boolean"], + "positive_dict": ["boolean"], + "transform_max_iter": [Interval(Integral, 0, None, closed="left")], + "callback": [None, callable], + "tol": [Interval(Real, 0, None, closed="left")], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + max_iter=1_000, + fit_algorithm="lars", + n_jobs=None, + batch_size=256, + shuffle=True, + dict_init=None, + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + verbose=False, + split_sign=False, + random_state=None, + positive_code=False, + positive_dict=False, + transform_max_iter=1000, + callback=None, + tol=1e-3, + max_no_improvement=10, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.n_components = n_components + self.alpha = alpha + self.max_iter = max_iter + self.fit_algorithm = fit_algorithm + self.dict_init = dict_init + self.verbose = verbose + self.shuffle = shuffle + self.batch_size = batch_size + self.split_sign = split_sign + self.random_state = random_state + self.positive_dict = positive_dict + self.callback = callback + self.max_no_improvement = max_no_improvement + self.tol = tol + + def _check_params(self, X): + # n_components + self._n_components = self.n_components + if self._n_components is None: + self._n_components = X.shape[1] + + # fit_algorithm + _check_positive_coding(self.fit_algorithm, self.positive_code) + self._fit_algorithm = "lasso_" + self.fit_algorithm + + # batch_size + self._batch_size = min(self.batch_size, X.shape[0]) + + def _initialize_dict(self, X, random_state): + """Initialization of the dictionary.""" + if self.dict_init is not None: + dictionary = self.dict_init + else: + # Init V with SVD of X + _, S, dictionary = randomized_svd( + X, self._n_components, random_state=random_state + ) + dictionary = S[:, np.newaxis] * dictionary + + if self._n_components <= len(dictionary): + dictionary = dictionary[: self._n_components, :] + else: + dictionary = np.concatenate( + ( + dictionary, + np.zeros( + (self._n_components - len(dictionary), dictionary.shape[1]), + dtype=dictionary.dtype, + ), + ) + ) + + dictionary = check_array(dictionary, order="F", dtype=X.dtype, copy=False) + dictionary = np.require(dictionary, requirements="W") + + return dictionary + + def _update_inner_stats(self, X, code, batch_size, step): + """Update the inner stats inplace.""" + if step < batch_size - 1: + theta = (step + 1) * batch_size + else: + theta = batch_size**2 + step + 1 - batch_size + beta = (theta + 1 - batch_size) / (theta + 1) + + self._A *= beta + self._A += code.T @ code / batch_size + self._B *= beta + self._B += X.T @ code / batch_size + + def _minibatch_step(self, X, dictionary, random_state, step): + """Perform the update on the dictionary for one minibatch.""" + batch_size = X.shape[0] + + # Compute code for this batch + code = _sparse_encode( + X, + dictionary, + algorithm=self._fit_algorithm, + alpha=self.alpha, + n_jobs=self.n_jobs, + positive=self.positive_code, + max_iter=self.transform_max_iter, + verbose=self.verbose, + ) + + batch_cost = ( + 0.5 * ((X - code @ dictionary) ** 2).sum() + + self.alpha * np.sum(np.abs(code)) + ) / batch_size + + # Update inner stats + self._update_inner_stats(X, code, batch_size, step) + + # Update dictionary + _update_dict( + dictionary, + X, + code, + self._A, + self._B, + verbose=self.verbose, + random_state=random_state, + positive=self.positive_dict, + ) + + return batch_cost + + def _check_convergence( + self, X, batch_cost, new_dict, old_dict, n_samples, step, n_steps + ): + """Helper function to encapsulate the early stopping logic. + + Early stopping is based on two factors: + - A small change of the dictionary between two minibatch updates. This is + controlled by the tol parameter. + - No more improvement on a smoothed estimate of the objective function for a + a certain number of consecutive minibatch updates. This is controlled by + the max_no_improvement parameter. + """ + batch_size = X.shape[0] + + # counts steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore 100 first steps or 1 epoch to avoid initializing the ewa_cost with a + # too bad value + if step <= min(100, n_samples / batch_size): + if self.verbose: + print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}") + return False + + # Compute an Exponentially Weighted Average of the cost function to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_cost is None: + self._ewa_cost = batch_cost + else: + alpha = batch_size / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha + + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch cost: " + f"{batch_cost}, ewa cost: {self._ewa_cost}" + ) + + # Early stopping based on change of dictionary + dict_diff = linalg.norm(new_dict - old_dict) / self._n_components + if self.tol > 0 and dict_diff <= self.tol: + if self.verbose: + print(f"Converged (small dictionary change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # cost function + if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min: + self._no_improvement = 0 + self._ewa_cost_min = self._ewa_cost + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in objective function) " + f"at step {step}/{n_steps}" + ) + return True + + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data( + X, dtype=[np.float64, np.float32], order="C", copy=False + ) + + self._check_params(X) + self._random_state = check_random_state(self.random_state) + + dictionary = self._initialize_dict(X, self._random_state) + old_dict = dictionary.copy() + + if self.shuffle: + X_train = X.copy() + self._random_state.shuffle(X_train) + else: + X_train = X + + n_samples, n_features = X_train.shape + + if self.verbose: + print("[dict_learning]") + + # Inner stats + self._A = np.zeros( + (self._n_components, self._n_components), dtype=X_train.dtype + ) + self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype) + + # TODO(1.6): remove in 1.6 + if self.max_iter is None: + warn( + ( + "`max_iter=None` is deprecated in version 1.4 and will be removed" + " in version 1.6. Use the default value (i.e. `1_000`) instead." + ), + FutureWarning, + ) + max_iter = 1_000 + else: + max_iter = self.max_iter + + # Attributes to monitor the convergence + self._ewa_cost = None + self._ewa_cost_min = None + self._no_improvement = 0 + + batches = gen_batches(n_samples, self._batch_size) + batches = itertools.cycle(batches) + n_steps_per_iter = int(np.ceil(n_samples / self._batch_size)) + n_steps = max_iter * n_steps_per_iter + + i = -1 # to allow max_iter = 0 + + for i, batch in zip(range(n_steps), batches): + X_batch = X_train[batch] + + batch_cost = self._minibatch_step( + X_batch, dictionary, self._random_state, i + ) + + if self._check_convergence( + X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps + ): + break + + # XXX callback param added for backward compat in #18975 but a common + # unified callback API should be preferred + if self.callback is not None: + self.callback(locals()) + + old_dict[:] = dictionary + + self.n_steps_ = i + 1 + self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter) + self.components_ = dictionary + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Update the model using the data in X as a mini-batch. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Return the instance itself. + """ + has_components = hasattr(self, "components_") + + X = self._validate_data( + X, dtype=[np.float64, np.float32], order="C", reset=not has_components + ) + + if not has_components: + # This instance has not been fitted yet (fit or partial_fit) + self._check_params(X) + self._random_state = check_random_state(self.random_state) + + dictionary = self._initialize_dict(X, self._random_state) + + self.n_steps_ = 0 + + self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype) + self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype) + else: + dictionary = self.components_ + + self._minibatch_step(X, dictionary, self._random_state, self.n_steps_) + + self.components_ = dictionary + self.n_steps_ += 1 + + return self + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..af3498d5344836330d016b5c6fb24d0a6f9fd723 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py @@ -0,0 +1,458 @@ +"""Factor Analysis. + +A latent linear variable model. + +FactorAnalysis is similar to probabilistic PCA implemented by PCA.score +While PCA assumes Gaussian noise with the same variance for each +feature, the FactorAnalysis model assumes different variances for +each of them. + +This implementation is based on David Barber's Book, +Bayesian Reasoning and Machine Learning, +http://www.cs.ucl.ac.uk/staff/d.barber/brml, +Algorithm 21.1 +""" + +# Author: Christian Osendorfer +# Alexandre Gramfort +# Denis A. Engemann + +# License: BSD3 + +import warnings +from math import log, sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import fast_logdet, randomized_svd, squared_norm +from ..utils.validation import check_is_fitted + + +class FactorAnalysis(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Factor Analysis (FA). + + A simple linear generative model with Gaussian latent variables. + + The observations are assumed to be caused by a linear transformation of + lower dimensional latent factors and added Gaussian noise. + Without loss of generality the factors are distributed according to a + Gaussian with zero mean and unit covariance. The noise is also zero mean + and has an arbitrary diagonal covariance matrix. + + If we would restrict the model further, by assuming that the Gaussian + noise is even isotropic (all diagonal entries are the same) we would obtain + :class:`PCA`. + + FactorAnalysis performs a maximum likelihood estimate of the so-called + `loading` matrix, the transformation of the latent variables to the + observed ones, using SVD based approach. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_components : int, default=None + Dimensionality of latent space, the number of components + of ``X`` that are obtained after ``transform``. + If None, n_components is set to the number of features. + + tol : float, default=1e-2 + Stopping tolerance for log-likelihood increase. + + copy : bool, default=True + Whether to make a copy of X. If ``False``, the input X gets overwritten + during fitting. + + max_iter : int, default=1000 + Maximum number of iterations. + + noise_variance_init : array-like of shape (n_features,), default=None + The initial guess of the noise variance for each feature. + If None, it defaults to np.ones(n_features). + + svd_method : {'lapack', 'randomized'}, default='randomized' + Which SVD method to use. If 'lapack' use standard SVD from + scipy.linalg, if 'randomized' use fast ``randomized_svd`` function. + Defaults to 'randomized'. For most applications 'randomized' will + be sufficiently precise while providing significant speed gains. + Accuracy can also be improved by setting higher values for + `iterated_power`. If this is not sufficient, for maximum precision + you should choose 'lapack'. + + iterated_power : int, default=3 + Number of iterations for the power method. 3 by default. Only used + if ``svd_method`` equals 'randomized'. + + rotation : {'varimax', 'quartimax'}, default=None + If not None, apply the indicated rotation. Currently, varimax and + quartimax are implemented. See + `"The varimax criterion for analytic rotation in factor analysis" + `_ + H. F. Kaiser, 1958. + + .. versionadded:: 0.24 + + random_state : int or RandomState instance, default=0 + Only used when ``svd_method`` equals 'randomized'. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Components with maximum variance. + + loglike_ : list of shape (n_iterations,) + The log likelihood at each iteration. + + noise_variance_ : ndarray of shape (n_features,) + The estimated noise variance for each feature. + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA: Principal component analysis is also a latent linear variable model + which however assumes equal noise variance for each feature. + This extra assumption makes probabilistic PCA faster as it can be + computed in closed form. + FastICA: Independent component analysis, a latent variable model with + non-Gaussian latent variables. + + References + ---------- + - David Barber, Bayesian Reasoning and Machine Learning, + Algorithm 21.1. + + - Christopher M. Bishop: Pattern Recognition and Machine Learning, + Chapter 12.2.4. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import FactorAnalysis + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = FactorAnalysis(n_components=7, random_state=0) + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 0, None, closed="left"), None], + "tol": [Interval(Real, 0.0, None, closed="left")], + "copy": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "noise_variance_init": ["array-like", None], + "svd_method": [StrOptions({"randomized", "lapack"})], + "iterated_power": [Interval(Integral, 0, None, closed="left")], + "rotation": [StrOptions({"varimax", "quartimax"}), None], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + tol=1e-2, + copy=True, + max_iter=1000, + noise_variance_init=None, + svd_method="randomized", + iterated_power=3, + rotation=None, + random_state=0, + ): + self.n_components = n_components + self.copy = copy + self.tol = tol + self.max_iter = max_iter + self.svd_method = svd_method + + self.noise_variance_init = noise_variance_init + self.iterated_power = iterated_power + self.random_state = random_state + self.rotation = rotation + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the FactorAnalysis model to X using SVD based approach. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Ignored parameter. + + Returns + ------- + self : object + FactorAnalysis class instance. + """ + X = self._validate_data(X, copy=self.copy, dtype=np.float64) + + n_samples, n_features = X.shape + n_components = self.n_components + if n_components is None: + n_components = n_features + + self.mean_ = np.mean(X, axis=0) + X -= self.mean_ + + # some constant terms + nsqrt = sqrt(n_samples) + llconst = n_features * log(2.0 * np.pi) + n_components + var = np.var(X, axis=0) + + if self.noise_variance_init is None: + psi = np.ones(n_features, dtype=X.dtype) + else: + if len(self.noise_variance_init) != n_features: + raise ValueError( + "noise_variance_init dimension does not " + "with number of features : %d != %d" + % (len(self.noise_variance_init), n_features) + ) + psi = np.array(self.noise_variance_init) + + loglike = [] + old_ll = -np.inf + SMALL = 1e-12 + + # we'll modify svd outputs to return unexplained variance + # to allow for unified computation of loglikelihood + if self.svd_method == "lapack": + + def my_svd(X): + _, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False) + return ( + s[:n_components], + Vt[:n_components], + squared_norm(s[n_components:]), + ) + + else: # svd_method == "randomized" + random_state = check_random_state(self.random_state) + + def my_svd(X): + _, s, Vt = randomized_svd( + X, + n_components, + random_state=random_state, + n_iter=self.iterated_power, + ) + return s, Vt, squared_norm(X) - squared_norm(s) + + for i in range(self.max_iter): + # SMALL helps numerics + sqrt_psi = np.sqrt(psi) + SMALL + s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt)) + s **= 2 + # Use 'maximum' here to avoid sqrt problems. + W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt + del Vt + W *= sqrt_psi + + # loglikelihood + ll = llconst + np.sum(np.log(s)) + ll += unexp_var + np.sum(np.log(psi)) + ll *= -n_samples / 2.0 + loglike.append(ll) + if (ll - old_ll) < self.tol: + break + old_ll = ll + + psi = np.maximum(var - np.sum(W**2, axis=0), SMALL) + else: + warnings.warn( + "FactorAnalysis did not converge." + + " You might want" + + " to increase the number of iterations.", + ConvergenceWarning, + ) + + self.components_ = W + if self.rotation is not None: + self.components_ = self._rotate(W) + self.noise_variance_ = psi + self.loglike_ = loglike + self.n_iter_ = i + 1 + return self + + def transform(self, X): + """Apply dimensionality reduction to X using the model. + + Compute the expected mean of the latent variables. + See Barber, 21.2.33 (or Bishop, 12.66). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + The latent variables of X. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + Ih = np.eye(len(self.components_)) + + X_transformed = X - self.mean_ + + Wpsi = self.components_ / self.noise_variance_ + cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T)) + tmp = np.dot(X_transformed, Wpsi.T) + X_transformed = np.dot(tmp, cov_z) + + return X_transformed + + def get_covariance(self): + """Compute data covariance with the FactorAnalysis model. + + ``cov = components_.T * components_ + diag(noise_variance)`` + + Returns + ------- + cov : ndarray of shape (n_features, n_features) + Estimated covariance of data. + """ + check_is_fitted(self) + + cov = np.dot(self.components_.T, self.components_) + cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace + return cov + + def get_precision(self): + """Compute data precision matrix with the FactorAnalysis model. + + Returns + ------- + precision : ndarray of shape (n_features, n_features) + Estimated precision of data. + """ + check_is_fitted(self) + + n_features = self.components_.shape[1] + + # handle corner cases first + if self.n_components == 0: + return np.diag(1.0 / self.noise_variance_) + if self.n_components == n_features: + return linalg.inv(self.get_covariance()) + + # Get precision using matrix inversion lemma + components_ = self.components_ + precision = np.dot(components_ / self.noise_variance_, components_.T) + precision.flat[:: len(precision) + 1] += 1.0 + precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) + precision /= self.noise_variance_[:, np.newaxis] + precision /= -self.noise_variance_[np.newaxis, :] + precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_ + return precision + + def score_samples(self, X): + """Compute the log-likelihood of each sample. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data. + + Returns + ------- + ll : ndarray of shape (n_samples,) + Log-likelihood of each sample under the current model. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + Xr = X - self.mean_ + precision = self.get_precision() + n_features = X.shape[1] + log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision)) + return log_like + + def score(self, X, y=None): + """Compute the average log-likelihood of the samples. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data. + + y : Ignored + Ignored parameter. + + Returns + ------- + ll : float + Average log-likelihood of the samples under the current model. + """ + return np.mean(self.score_samples(X)) + + def _rotate(self, components, n_components=None, tol=1e-6): + "Rotate the factor analysis solution." + # note that tol is not exposed + return _ortho_rotation(components.T, method=self.rotation, tol=tol)[ + : self.n_components + ] + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + +def _ortho_rotation(components, method="varimax", tol=1e-6, max_iter=100): + """Return rotated components.""" + nrow, ncol = components.shape + rotation_matrix = np.eye(ncol) + var = 0 + + for _ in range(max_iter): + comp_rot = np.dot(components, rotation_matrix) + if method == "varimax": + tmp = comp_rot * np.transpose((comp_rot**2).sum(axis=0) / nrow) + elif method == "quartimax": + tmp = 0 + u, s, v = np.linalg.svd(np.dot(components.T, comp_rot**3 - tmp)) + rotation_matrix = np.dot(u, v) + var_new = np.sum(s) + if var != 0 and var_new < var * (1 + tol): + break + var = var_new + + return np.dot(components, rotation_matrix).T diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f36e5ba87dbc0ed737c1f07ec51d35c1f2d18e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py @@ -0,0 +1,795 @@ +""" +Python implementation of the fast ICA algorithms. + +Reference: Tables 8.3 and 8.4 page 196 in the book: +Independent Component Analysis, by Hyvarinen et al. +""" + +# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux, +# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import as_float_array, check_array, check_random_state +from ..utils._param_validation import Interval, Options, StrOptions, validate_params +from ..utils.validation import check_is_fitted + +__all__ = ["fastica", "FastICA"] + + +def _gs_decorrelation(w, W, j): + """ + Orthonormalize w wrt the first j rows of W. + + Parameters + ---------- + w : ndarray of shape (n,) + Array to be orthogonalized + + W : ndarray of shape (p, n) + Null space definition + + j : int < p + The no of (from the first) rows of Null space W wrt which w is + orthogonalized. + + Notes + ----- + Assumes that W is orthogonal + w changed in place + """ + w -= np.linalg.multi_dot([w, W[:j].T, W[:j]]) + return w + + +def _sym_decorrelation(W): + """Symmetric decorrelation + i.e. W <- (W * W.T) ^{-1/2} * W + """ + s, u = linalg.eigh(np.dot(W, W.T)) + # Avoid sqrt of negative values because of rounding errors. Note that + # np.sqrt(tiny) is larger than tiny and therefore this clipping also + # prevents division by zero in the next step. + s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None) + + # u (resp. s) contains the eigenvectors (resp. square roots of + # the eigenvalues) of W * W.T + return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W]) + + +def _ica_def(X, tol, g, fun_args, max_iter, w_init): + """Deflationary FastICA using fun approx to neg-entropy function + + Used internally by FastICA. + """ + + n_components = w_init.shape[0] + W = np.zeros((n_components, n_components), dtype=X.dtype) + n_iter = [] + + # j is the index of the extracted component + for j in range(n_components): + w = w_init[j, :].copy() + w /= np.sqrt((w**2).sum()) + + for i in range(max_iter): + gwtx, g_wtx = g(np.dot(w.T, X), fun_args) + + w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w + + _gs_decorrelation(w1, W, j) + + w1 /= np.sqrt((w1**2).sum()) + + lim = np.abs(np.abs((w1 * w).sum()) - 1) + w = w1 + if lim < tol: + break + + n_iter.append(i + 1) + W[j, :] = w + + return W, max(n_iter) + + +def _ica_par(X, tol, g, fun_args, max_iter, w_init): + """Parallel FastICA. + + Used internally by FastICA --main loop + + """ + W = _sym_decorrelation(w_init) + del w_init + p_ = float(X.shape[1]) + for ii in range(max_iter): + gwtx, g_wtx = g(np.dot(W, X), fun_args) + W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W) + del gwtx, g_wtx + # builtin max, abs are faster than numpy counter parts. + # np.einsum allows having the lowest memory footprint. + # It is faster than np.diag(np.dot(W1, W.T)). + lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1)) + W = W1 + if lim < tol: + break + else: + warnings.warn( + ( + "FastICA did not converge. Consider increasing " + "tolerance or the maximum number of iterations." + ), + ConvergenceWarning, + ) + + return W, ii + 1 + + +# Some standard non-linear functions. +# XXX: these should be optimized, as they can be a bottleneck. +def _logcosh(x, fun_args=None): + alpha = fun_args.get("alpha", 1.0) # comment it out? + + x *= alpha + gx = np.tanh(x, x) # apply the tanh inplace + g_x = np.empty(x.shape[0], dtype=x.dtype) + # XXX compute in chunks to avoid extra allocation + for i, gx_i in enumerate(gx): # please don't vectorize. + g_x[i] = (alpha * (1 - gx_i**2)).mean() + return gx, g_x + + +def _exp(x, fun_args): + exp = np.exp(-(x**2) / 2) + gx = x * exp + g_x = (1 - x**2) * exp + return gx, g_x.mean(axis=-1) + + +def _cube(x, fun_args): + return x**3, (3 * x**2).mean(axis=-1) + + +@validate_params( + { + "X": ["array-like"], + "return_X_mean": ["boolean"], + "compute_sources": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def fastica( + X, + n_components=None, + *, + algorithm="parallel", + whiten="unit-variance", + fun="logcosh", + fun_args=None, + max_iter=200, + tol=1e-04, + w_init=None, + whiten_solver="svd", + random_state=None, + return_X_mean=False, + compute_sources=True, + return_n_iter=False, +): + """Perform Fast Independent Component Analysis. + + The implementation is based on [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + n_components : int, default=None + Number of components to use. If None is passed, all are used. + + algorithm : {'parallel', 'deflation'}, default='parallel' + Specify which algorithm to use for FastICA. + + whiten : str or bool, default='unit-variance' + Specify the whitening strategy to use. + + - If 'arbitrary-variance', a whitening with variance + arbitrary is used. + - If 'unit-variance', the whitening matrix is rescaled to ensure that + each recovered source has unit variance. + - If False, the data is already considered to be whitened, and no + whitening is performed. + + .. versionchanged:: 1.3 + The default value of `whiten` changed to 'unit-variance' in 1.3. + + fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' + The functional form of the G function used in the + approximation to neg-entropy. Could be either 'logcosh', 'exp', + or 'cube'. + You can also provide your own function. It should return a tuple + containing the value of the function, and of its derivative, in the + point. The derivative should be averaged along its last dimension. + Example:: + + def my_g(x): + return x ** 3, (3 * x ** 2).mean(axis=-1) + + fun_args : dict, default=None + Arguments to send to the functional form. + If empty or None and if fun='logcosh', fun_args will take value + {'alpha' : 1.0}. + + max_iter : int, default=200 + Maximum number of iterations to perform. + + tol : float, default=1e-4 + A positive scalar giving the tolerance at which the + un-mixing matrix is considered to have converged. + + w_init : ndarray of shape (n_components, n_components), default=None + Initial un-mixing array. If `w_init=None`, then an array of values + drawn from a normal distribution is used. + + whiten_solver : {"eigh", "svd"}, default="svd" + The solver to use for whitening. + + - "svd" is more stable numerically if the problem is degenerate, and + often faster when `n_samples <= n_features`. + + - "eigh" is generally more memory efficient when + `n_samples >= n_features`, and can be faster when + `n_samples >= 50 * n_features`. + + .. versionadded:: 1.2 + + random_state : int, RandomState instance or None, default=None + Used to initialize ``w_init`` when not specified, with a + normal distribution. Pass an int, for reproducible results + across multiple function calls. + See :term:`Glossary `. + + return_X_mean : bool, default=False + If True, X_mean is returned too. + + compute_sources : bool, default=True + If False, sources are not computed, but only the rotation matrix. + This can save memory when working with big data. Defaults to True. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + K : ndarray of shape (n_components, n_features) or None + If whiten is 'True', K is the pre-whitening matrix that projects data + onto the first n_components principal components. If whiten is 'False', + K is 'None'. + + W : ndarray of shape (n_components, n_components) + The square matrix that unmixes the data after whitening. + The mixing matrix is the pseudo-inverse of matrix ``W K`` + if K is not None, else it is the inverse of W. + + S : ndarray of shape (n_samples, n_components) or None + Estimated source matrix. + + X_mean : ndarray of shape (n_features,) + The mean over features. Returned only if return_X_mean is True. + + n_iter : int + If the algorithm is "deflation", n_iter is the + maximum number of iterations run across all components. Else + they are just the number of iterations taken to converge. This is + returned only when return_n_iter is set to `True`. + + Notes + ----- + The data matrix X is considered to be a linear combination of + non-Gaussian (independent) components i.e. X = AS where columns of S + contain the independent components and A is a linear mixing + matrix. In short ICA attempts to `un-mix' the data by estimating an + un-mixing matrix W where ``S = W K X.`` + While FastICA was proposed to estimate as many sources + as features, it is possible to estimate less by setting + n_components < n_features. It this case K is not a square matrix + and the estimated A is the pseudo-inverse of ``W K``. + + This implementation was originally made for data of shape + [n_features, n_samples]. Now the input is transposed + before the algorithm is applied. This makes it slightly + faster for Fortran-ordered input. + + References + ---------- + .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis", + Algorithms and Applications, Neural Networks, 13(4-5), 2000, + pp. 411-430. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import fastica + >>> X, _ = load_digits(return_X_y=True) + >>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance') + >>> K.shape + (7, 64) + >>> W.shape + (7, 7) + >>> S.shape + (1797, 7) + """ + est = FastICA( + n_components=n_components, + algorithm=algorithm, + whiten=whiten, + fun=fun, + fun_args=fun_args, + max_iter=max_iter, + tol=tol, + w_init=w_init, + whiten_solver=whiten_solver, + random_state=random_state, + ) + est._validate_params() + S = est._fit_transform(X, compute_sources=compute_sources) + + if est.whiten in ["unit-variance", "arbitrary-variance"]: + K = est.whitening_ + X_mean = est.mean_ + else: + K = None + X_mean = None + + returned_values = [K, est._unmixing, S] + if return_X_mean: + returned_values.append(X_mean) + if return_n_iter: + returned_values.append(est.n_iter_) + + return returned_values + + +class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """FastICA: a fast algorithm for Independent Component Analysis. + + The implementation is based on [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of components to use. If None is passed, all are used. + + algorithm : {'parallel', 'deflation'}, default='parallel' + Specify which algorithm to use for FastICA. + + whiten : str or bool, default='unit-variance' + Specify the whitening strategy to use. + + - If 'arbitrary-variance', a whitening with variance + arbitrary is used. + - If 'unit-variance', the whitening matrix is rescaled to ensure that + each recovered source has unit variance. + - If False, the data is already considered to be whitened, and no + whitening is performed. + + .. versionchanged:: 1.3 + The default value of `whiten` changed to 'unit-variance' in 1.3. + + fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' + The functional form of the G function used in the + approximation to neg-entropy. Could be either 'logcosh', 'exp', + or 'cube'. + You can also provide your own function. It should return a tuple + containing the value of the function, and of its derivative, in the + point. The derivative should be averaged along its last dimension. + Example:: + + def my_g(x): + return x ** 3, (3 * x ** 2).mean(axis=-1) + + fun_args : dict, default=None + Arguments to send to the functional form. + If empty or None and if fun='logcosh', fun_args will take value + {'alpha' : 1.0}. + + max_iter : int, default=200 + Maximum number of iterations during fit. + + tol : float, default=1e-4 + A positive scalar giving the tolerance at which the + un-mixing matrix is considered to have converged. + + w_init : array-like of shape (n_components, n_components), default=None + Initial un-mixing array. If `w_init=None`, then an array of values + drawn from a normal distribution is used. + + whiten_solver : {"eigh", "svd"}, default="svd" + The solver to use for whitening. + + - "svd" is more stable numerically if the problem is degenerate, and + often faster when `n_samples <= n_features`. + + - "eigh" is generally more memory efficient when + `n_samples >= n_features`, and can be faster when + `n_samples >= 50 * n_features`. + + .. versionadded:: 1.2 + + random_state : int, RandomState instance or None, default=None + Used to initialize ``w_init`` when not specified, with a + normal distribution. Pass an int, for reproducible results + across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The linear operator to apply to the data to get the independent + sources. This is equal to the unmixing matrix when ``whiten`` is + False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when + ``whiten`` is True. + + mixing_ : ndarray of shape (n_features, n_components) + The pseudo-inverse of ``components_``. It is the linear operator + that maps independent sources to the data. + + mean_ : ndarray of shape(n_features,) + The mean over features. Only set if `self.whiten` is True. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + If the algorithm is "deflation", n_iter is the + maximum number of iterations run across all components. Else + they are just the number of iterations taken to converge. + + whitening_ : ndarray of shape (n_components, n_features) + Only set if whiten is 'True'. This is the pre-whitening matrix + that projects data onto the first `n_components` principal components. + + See Also + -------- + PCA : Principal component analysis (PCA). + IncrementalPCA : Incremental principal components analysis (IPCA). + KernelPCA : Kernel Principal component analysis (KPCA). + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparsePCA : Sparse Principal Components Analysis (SparsePCA). + + References + ---------- + .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis: + Algorithms and Applications, Neural Networks, 13(4-5), 2000, + pp. 411-430. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import FastICA + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = FastICA(n_components=7, + ... random_state=0, + ... whiten='unit-variance') + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "algorithm": [StrOptions({"parallel", "deflation"})], + "whiten": [ + StrOptions({"arbitrary-variance", "unit-variance"}), + Options(bool, {False}), + ], + "fun": [StrOptions({"logcosh", "exp", "cube"}), callable], + "fun_args": [dict, None], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "w_init": ["array-like", None], + "whiten_solver": [StrOptions({"eigh", "svd"})], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + algorithm="parallel", + whiten="unit-variance", + fun="logcosh", + fun_args=None, + max_iter=200, + tol=1e-4, + w_init=None, + whiten_solver="svd", + random_state=None, + ): + super().__init__() + self.n_components = n_components + self.algorithm = algorithm + self.whiten = whiten + self.fun = fun + self.fun_args = fun_args + self.max_iter = max_iter + self.tol = tol + self.w_init = w_init + self.whiten_solver = whiten_solver + self.random_state = random_state + + def _fit_transform(self, X, compute_sources=False): + """Fit the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + compute_sources : bool, default=False + If False, sources are not computes but only the rotation matrix. + This can save memory when working with big data. Defaults to False. + + Returns + ------- + S : ndarray of shape (n_samples, n_components) or None + Sources matrix. `None` if `compute_sources` is `False`. + """ + XT = self._validate_data( + X, copy=self.whiten, dtype=[np.float64, np.float32], ensure_min_samples=2 + ).T + fun_args = {} if self.fun_args is None else self.fun_args + random_state = check_random_state(self.random_state) + + alpha = fun_args.get("alpha", 1.0) + if not 1 <= alpha <= 2: + raise ValueError("alpha must be in [1,2]") + + if self.fun == "logcosh": + g = _logcosh + elif self.fun == "exp": + g = _exp + elif self.fun == "cube": + g = _cube + elif callable(self.fun): + + def g(x, fun_args): + return self.fun(x, **fun_args) + + n_features, n_samples = XT.shape + n_components = self.n_components + if not self.whiten and n_components is not None: + n_components = None + warnings.warn("Ignoring n_components with whiten=False.") + + if n_components is None: + n_components = min(n_samples, n_features) + if n_components > min(n_samples, n_features): + n_components = min(n_samples, n_features) + warnings.warn( + "n_components is too large: it will be set to %s" % n_components + ) + + if self.whiten: + # Centering the features of X + X_mean = XT.mean(axis=-1) + XT -= X_mean[:, np.newaxis] + + # Whitening and preprocessing by PCA + if self.whiten_solver == "eigh": + # Faster when num_samples >> n_features + d, u = linalg.eigh(XT.dot(X)) + sort_indices = np.argsort(d)[::-1] + eps = np.finfo(d.dtype).eps + degenerate_idx = d < eps + if np.any(degenerate_idx): + warnings.warn( + "There are some small singular values, using " + "whiten_solver = 'svd' might lead to more " + "accurate results." + ) + d[degenerate_idx] = eps # For numerical issues + np.sqrt(d, out=d) + d, u = d[sort_indices], u[:, sort_indices] + elif self.whiten_solver == "svd": + u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2] + + # Give consistent eigenvectors for both svd solvers + u *= np.sign(u[0]) + + K = (u / d).T[:n_components] # see (6.33) p.140 + del u, d + X1 = np.dot(K, XT) + # see (13.6) p.267 Here X1 is white and data + # in X has been projected onto a subspace by PCA + X1 *= np.sqrt(n_samples) + else: + # X must be casted to floats to avoid typing issues with numpy + # 2.0 and the line below + X1 = as_float_array(XT, copy=False) # copy has been taken care of + + w_init = self.w_init + if w_init is None: + w_init = np.asarray( + random_state.normal(size=(n_components, n_components)), dtype=X1.dtype + ) + + else: + w_init = np.asarray(w_init) + if w_init.shape != (n_components, n_components): + raise ValueError( + "w_init has invalid shape -- should be %(shape)s" + % {"shape": (n_components, n_components)} + ) + + kwargs = { + "tol": self.tol, + "g": g, + "fun_args": fun_args, + "max_iter": self.max_iter, + "w_init": w_init, + } + + if self.algorithm == "parallel": + W, n_iter = _ica_par(X1, **kwargs) + elif self.algorithm == "deflation": + W, n_iter = _ica_def(X1, **kwargs) + del X1 + + self.n_iter_ = n_iter + + if compute_sources: + if self.whiten: + S = np.linalg.multi_dot([W, K, XT]).T + else: + S = np.dot(W, XT).T + else: + S = None + + if self.whiten: + if self.whiten == "unit-variance": + if not compute_sources: + S = np.linalg.multi_dot([W, K, XT]).T + S_std = np.std(S, axis=0, keepdims=True) + S /= S_std + W /= S_std.T + + self.components_ = np.dot(W, K) + self.mean_ = X_mean + self.whitening_ = K + else: + self.components_ = W + + self.mixing_ = linalg.pinv(self.components_, check_finite=False) + self._unmixing = W + + return S + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model and recover the sources from X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Estimated sources obtained by transforming the data with the + estimated unmixing matrix. + """ + return self._fit_transform(X, compute_sources=True) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self._fit_transform(X, compute_sources=False) + return self + + def transform(self, X, copy=True): + """Recover the sources from X (apply the unmixing matrix). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to transform, where `n_samples` is the number of samples + and `n_features` is the number of features. + + copy : bool, default=True + If False, data passed to fit can be overwritten. Defaults to True. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Estimated sources obtained by transforming the data with the + estimated unmixing matrix. + """ + check_is_fitted(self) + + X = self._validate_data( + X, copy=(copy and self.whiten), dtype=[np.float64, np.float32], reset=False + ) + if self.whiten: + X -= self.mean_ + + return np.dot(X, self.components_.T) + + def inverse_transform(self, X, copy=True): + """Transform the sources back to the mixed data (apply mixing matrix). + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + Sources, where `n_samples` is the number of samples + and `n_components` is the number of components. + copy : bool, default=True + If False, data passed to fit are overwritten. Defaults to True. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Reconstructed data obtained with the mixing matrix. + """ + check_is_fitted(self) + + X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32]) + X = np.dot(X, self.mixing_.T) + if self.whiten: + X += self.mean_ + + return X + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return {"preserves_dtype": [np.float32, np.float64]} diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..1089b2c54e086a79314d59b63532ff4842d9ccc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py @@ -0,0 +1,409 @@ +"""Incremental Principal Components Analysis.""" + +# Author: Kyle Kastner +# Giorgio Patrini +# License: BSD 3 clause + +from numbers import Integral + +import numpy as np +from scipy import linalg, sparse + +from ..base import _fit_context +from ..utils import gen_batches +from ..utils._param_validation import Interval +from ..utils.extmath import _incremental_mean_and_var, svd_flip +from ._base import _BasePCA + + +class IncrementalPCA(_BasePCA): + """Incremental principal components analysis (IPCA). + + Linear dimensionality reduction using Singular Value Decomposition of + the data, keeping only the most significant singular vectors to + project the data to a lower dimensional space. The input data is centered + but not scaled for each feature before applying the SVD. + + Depending on the size of the input data, this algorithm can be much more + memory efficient than a PCA, and allows sparse input. + + This algorithm has constant memory complexity, on the order + of ``batch_size * n_features``, enabling use of np.memmap files without + loading the entire file into memory. For sparse matrices, the input + is converted to dense in batches (in order to be able to subtract the + mean) which avoids storing the entire dense matrix at any one time. + + The computational overhead of each SVD is + ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples + remain in memory at a time. There will be ``n_samples / batch_size`` SVD + computations to get the principal components, versus 1 large SVD of + complexity ``O(n_samples * n_features ** 2)`` for PCA. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_incremental_pca.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + n_components : int, default=None + Number of components to keep. If ``n_components`` is ``None``, + then ``n_components`` is set to ``min(n_samples, n_features)``. + + whiten : bool, default=False + When True (False by default) the ``components_`` vectors are divided + by ``n_samples`` times ``components_`` to ensure uncorrelated outputs + with unit component-wise variances. + + Whitening will remove some information from the transformed signal + (the relative variance scales of the components) but can sometimes + improve the predictive accuracy of the downstream estimators by + making data respect some hard-wired assumptions. + + copy : bool, default=True + If False, X will be overwritten. ``copy=False`` can be used to + save memory but is unsafe for general use. + + batch_size : int, default=None + The number of samples to use for each batch. Only used when calling + ``fit``. If ``batch_size`` is ``None``, then ``batch_size`` + is inferred from the data and set to ``5 * n_features``, to provide a + balance between approximation accuracy and memory consumption. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of + maximum variance in the data. Equivalently, the right singular + vectors of the centered input data, parallel to its eigenvectors. + The components are sorted by decreasing ``explained_variance_``. + + explained_variance_ : ndarray of shape (n_components,) + Variance explained by each of the selected components. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + If all components are stored, the sum of explained variances is equal + to 1.0. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, aggregate over calls to ``partial_fit``. + + var_ : ndarray of shape (n_features,) + Per-feature empirical variance, aggregate over calls to + ``partial_fit``. + + noise_variance_ : float + The estimated noise covariance following the Probabilistic PCA model + from Tipping and Bishop 1999. See "Pattern Recognition and + Machine Learning" by C. Bishop, 12.2.1 p. 574 or + http://www.miketipping.com/papers/met-mppca.pdf. + + n_components_ : int + The estimated number of components. Relevant when + ``n_components=None``. + + n_samples_seen_ : int + The number of samples processed by the estimator. Will be reset on + new calls to fit, but increments across ``partial_fit`` calls. + + batch_size_ : int + Inferred batch size from ``batch_size``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA : Principal component analysis (PCA). + KernelPCA : Kernel Principal component analysis (KPCA). + SparsePCA : Sparse Principal Components Analysis (SparsePCA). + TruncatedSVD : Dimensionality reduction using truncated SVD. + + Notes + ----- + Implements the incremental PCA model from: + *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual + Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, + pp. 125-141, May 2008.* + See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf + + This model is an extension of the Sequential Karhunen-Loeve Transform from: + :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and + its Application to Images, IEEE Transactions on Image Processing, Volume 9, + Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>` + + We have specifically abstained from an optimization used by authors of both + papers, a QR decomposition used in specific situations to reduce the + algorithmic complexity of the SVD. The source for this technique is + *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5, + section 5.4.4, pp 252-253.*. This technique has been omitted because it is + advantageous only when decomposing a matrix with ``n_samples`` (rows) + >= 5/3 * ``n_features`` (columns), and hurts the readability of the + implemented algorithm. This would be a good opportunity for future + optimization, if it is deemed necessary. + + References + ---------- + D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual + Tracking, International Journal of Computer Vision, Volume 77, + Issue 1-3, pp. 125-141, May 2008. + + G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5, + Section 5.4.4, pp. 252-253. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import IncrementalPCA + >>> from scipy import sparse + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = IncrementalPCA(n_components=7, batch_size=200) + >>> # either partially fit on smaller batches of data + >>> transformer.partial_fit(X[:100, :]) + IncrementalPCA(batch_size=200, n_components=7) + >>> # or let the fit function itself divide the data into batches + >>> X_sparse = sparse.csr_matrix(X) + >>> X_transformed = transformer.fit_transform(X_sparse) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "whiten": ["boolean"], + "copy": ["boolean"], + "batch_size": [Interval(Integral, 1, None, closed="left"), None], + } + + def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None): + self.n_components = n_components + self.whiten = whiten + self.copy = copy + self.batch_size = batch_size + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X, using minibatches of size batch_size. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self.components_ = None + self.n_samples_seen_ = 0 + self.mean_ = 0.0 + self.var_ = 0.0 + self.singular_values_ = None + self.explained_variance_ = None + self.explained_variance_ratio_ = None + self.noise_variance_ = None + + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "lil"], + copy=self.copy, + dtype=[np.float64, np.float32], + ) + n_samples, n_features = X.shape + + if self.batch_size is None: + self.batch_size_ = 5 * n_features + else: + self.batch_size_ = self.batch_size + + for batch in gen_batches( + n_samples, self.batch_size_, min_batch_size=self.n_components or 0 + ): + X_batch = X[batch] + if sparse.issparse(X_batch): + X_batch = X_batch.toarray() + self.partial_fit(X_batch, check_input=False) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, check_input=True): + """Incremental fit with X. All of X is processed as a single batch. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + check_input : bool, default=True + Run check_array on X. + + Returns + ------- + self : object + Returns the instance itself. + """ + first_pass = not hasattr(self, "components_") + + if check_input: + if sparse.issparse(X): + raise TypeError( + "IncrementalPCA.partial_fit does not support " + "sparse input. Either convert data to dense " + "or use IncrementalPCA.fit to do so in batches." + ) + X = self._validate_data( + X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass + ) + n_samples, n_features = X.shape + if first_pass: + self.components_ = None + + if self.n_components is None: + if self.components_ is None: + self.n_components_ = min(n_samples, n_features) + else: + self.n_components_ = self.components_.shape[0] + elif not self.n_components <= n_features: + raise ValueError( + "n_components=%r invalid for n_features=%d, need " + "more rows than columns for IncrementalPCA " + "processing" % (self.n_components, n_features) + ) + elif not self.n_components <= n_samples: + raise ValueError( + "n_components=%r must be less or equal to " + "the batch number of samples " + "%d." % (self.n_components, n_samples) + ) + else: + self.n_components_ = self.n_components + + if (self.components_ is not None) and ( + self.components_.shape[0] != self.n_components_ + ): + raise ValueError( + "Number of input features has changed from %i " + "to %i between calls to partial_fit! Try " + "setting n_components to a fixed value." + % (self.components_.shape[0], self.n_components_) + ) + + # This is the first partial_fit + if not hasattr(self, "n_samples_seen_"): + self.n_samples_seen_ = 0 + self.mean_ = 0.0 + self.var_ = 0.0 + + # Update stats - they are 0 if this is the first step + col_mean, col_var, n_total_samples = _incremental_mean_and_var( + X, + last_mean=self.mean_, + last_variance=self.var_, + last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]), + ) + n_total_samples = n_total_samples[0] + + # Whitening + if self.n_samples_seen_ == 0: + # If it is the first step, simply whiten X + X -= col_mean + else: + col_batch_mean = np.mean(X, axis=0) + X -= col_batch_mean + # Build matrix of combined previous basis and new data + mean_correction = np.sqrt( + (self.n_samples_seen_ / n_total_samples) * n_samples + ) * (self.mean_ - col_batch_mean) + X = np.vstack( + ( + self.singular_values_.reshape((-1, 1)) * self.components_, + X, + mean_correction, + ) + ) + + U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False) + U, Vt = svd_flip(U, Vt, u_based_decision=False) + explained_variance = S**2 / (n_total_samples - 1) + explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples) + + self.n_samples_seen_ = n_total_samples + self.components_ = Vt[: self.n_components_] + self.singular_values_ = S[: self.n_components_] + self.mean_ = col_mean + self.var_ = col_var + self.explained_variance_ = explained_variance[: self.n_components_] + self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_] + # we already checked `self.n_components <= n_samples` above + if self.n_components_ not in (n_samples, n_features): + self.noise_variance_ = explained_variance[self.n_components_ :].mean() + else: + self.noise_variance_ = 0.0 + return self + + def transform(self, X): + """Apply dimensionality reduction to X. + + X is projected on the first principal components previously extracted + from a training set, using minibatches of size batch_size if X is + sparse. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Projection of X in the first principal components. + + Examples + -------- + + >>> import numpy as np + >>> from sklearn.decomposition import IncrementalPCA + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], + ... [1, 1], [2, 1], [3, 2]]) + >>> ipca = IncrementalPCA(n_components=2, batch_size=3) + >>> ipca.fit(X) + IncrementalPCA(batch_size=3, n_components=2) + >>> ipca.transform(X) # doctest: +SKIP + """ + if sparse.issparse(X): + n_samples = X.shape[0] + output = [] + for batch in gen_batches( + n_samples, self.batch_size_, min_batch_size=self.n_components or 0 + ): + output.append(super().transform(X[batch].toarray())) + return np.vstack(output) + else: + return super().transform(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..8fc4aa26a6dfb87428ce59c58d18632cffdc2ad6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py @@ -0,0 +1,572 @@ +"""Kernel Principal Components Analysis.""" + +# Author: Mathieu Blondel +# Sylvain Marie +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.linalg import eigh +from scipy.sparse.linalg import eigsh + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import NotFittedError +from ..metrics.pairwise import pairwise_kernels +from ..preprocessing import KernelCenterer +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import _randomized_eigsh, svd_flip +from ..utils.validation import ( + _check_psd_eigenvalues, + check_is_fitted, +) + + +class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Kernel Principal component analysis (KPCA) [1]_. + + Non-linear dimensionality reduction through the use of kernels (see + :ref:`metrics`). + + It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD + or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the + truncated SVD, depending on the shape of the input data and the number of + components to extract. It can also use a randomized truncated SVD by the + method proposed in [3]_, see `eigen_solver`. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_kernel_pca.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of components. If None, all non-zero components are kept. + + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \ + or callable, default='linear' + Kernel used for PCA. + + gamma : float, default=None + Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other + kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``. + + degree : float, default=3 + Degree for poly kernels. Ignored by other kernels. + + coef0 : float, default=1 + Independent term in poly and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict, default=None + Parameters (keyword arguments) and + values for kernel passed as callable object. + Ignored by other kernels. + + alpha : float, default=1.0 + Hyperparameter of the ridge regression that learns the + inverse transform (when fit_inverse_transform=True). + + fit_inverse_transform : bool, default=False + Learn the inverse transform for non-precomputed kernels + (i.e. learn to find the pre-image of a point). This method is based + on [2]_. + + eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \ + default='auto' + Select eigensolver to use. If `n_components` is much + less than the number of training samples, randomized (or arpack to a + smaller extent) may be more efficient than the dense eigensolver. + Randomized SVD is performed according to the method of Halko et al + [3]_. + + auto : + the solver is selected by a default policy based on n_samples + (the number of training samples) and `n_components`: + if the number of components to extract is less than 10 (strict) and + the number of samples is more than 200 (strict), the 'arpack' + method is enabled. Otherwise the exact full eigenvalue + decomposition is computed and optionally truncated afterwards + ('dense' method). + dense : + run exact full eigenvalue decomposition calling the standard + LAPACK solver via `scipy.linalg.eigh`, and select the components + by postprocessing + arpack : + run SVD truncated to n_components calling ARPACK solver using + `scipy.sparse.linalg.eigsh`. It requires strictly + 0 < n_components < n_samples + randomized : + run randomized SVD by the method of Halko et al. [3]_. The current + implementation selects eigenvalues based on their module; therefore + using this method can lead to unexpected results if the kernel is + not positive semi-definite. See also [4]_. + + .. versionchanged:: 1.0 + `'randomized'` was added. + + tol : float, default=0 + Convergence tolerance for arpack. + If 0, optimal value will be chosen by arpack. + + max_iter : int, default=None + Maximum number of iterations for arpack. + If None, optimal value will be chosen by arpack. + + iterated_power : int >= 0, or 'auto', default='auto' + Number of iterations for the power method computed by + svd_solver == 'randomized'. When 'auto', it is set to 7 when + `n_components < 0.1 * min(X.shape)`, other it is set to 4. + + .. versionadded:: 1.0 + + remove_zero_eig : bool, default=False + If True, then all components with zero eigenvalues are removed, so + that the number of components in the output may be < n_components + (and sometimes even zero due to numerical instability). + When n_components is None, this parameter is ignored and components + with zero eigenvalues are removed regardless. + + random_state : int, RandomState instance or None, default=None + Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + .. versionadded:: 0.18 + + copy_X : bool, default=True + If True, input X is copied and stored by the model in the `X_fit_` + attribute. If no further changes will be done to X, setting + `copy_X=False` saves memory by storing a reference. + + .. versionadded:: 0.18 + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.18 + + Attributes + ---------- + eigenvalues_ : ndarray of shape (n_components,) + Eigenvalues of the centered kernel matrix in decreasing order. + If `n_components` and `remove_zero_eig` are not set, + then all values are stored. + + eigenvectors_ : ndarray of shape (n_samples, n_components) + Eigenvectors of the centered kernel matrix. If `n_components` and + `remove_zero_eig` are not set, then all components are stored. + + dual_coef_ : ndarray of shape (n_samples, n_features) + Inverse transform matrix. Only available when + ``fit_inverse_transform`` is True. + + X_transformed_fit_ : ndarray of shape (n_samples, n_components) + Projection of the fitted data on the kernel principal components. + Only available when ``fit_inverse_transform`` is True. + + X_fit_ : ndarray of shape (n_samples, n_features) + The data used to fit the model. If `copy_X=False`, then `X_fit_` is + a reference. This attribute is used for the calls to transform. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + gamma_ : float + Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma` + is explicitly provided, this is just the same as `gamma`. When `gamma` + is `None`, this is the actual value of kernel coefficient. + + .. versionadded:: 1.3 + + See Also + -------- + FastICA : A fast algorithm for Independent Component Analysis. + IncrementalPCA : Incremental Principal Component Analysis. + NMF : Non-Negative Matrix Factorization. + PCA : Principal Component Analysis. + SparsePCA : Sparse Principal Component Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + References + ---------- + .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller. + "Kernel principal component analysis." + International conference on artificial neural networks. + Springer, Berlin, Heidelberg, 1997. + `_ + + .. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. + "Learning to find pre-images." + Advances in neural information processing systems 16 (2004): 449-456. + `_ + + .. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp. + "Finding structure with randomness: Probabilistic algorithms for + constructing approximate matrix decompositions." + SIAM review 53.2 (2011): 217-288. <0909.4061>` + + .. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert. + "A randomized algorithm for the decomposition of matrices." + Applied and Computational Harmonic Analysis 30.1 (2011): 47-68. + `_ + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import KernelPCA + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = KernelPCA(n_components=7, kernel='linear') + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "kernel": [ + StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}), + callable, + ], + "gamma": [ + Interval(Real, 0, None, closed="left"), + None, + ], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + "alpha": [Interval(Real, 0, None, closed="left")], + "fit_inverse_transform": ["boolean"], + "eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "iterated_power": [ + Interval(Integral, 0, None, closed="left"), + StrOptions({"auto"}), + ], + "remove_zero_eig": ["boolean"], + "random_state": ["random_state"], + "copy_X": ["boolean"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + n_components=None, + *, + kernel="linear", + gamma=None, + degree=3, + coef0=1, + kernel_params=None, + alpha=1.0, + fit_inverse_transform=False, + eigen_solver="auto", + tol=0, + max_iter=None, + iterated_power="auto", + remove_zero_eig=False, + random_state=None, + copy_X=True, + n_jobs=None, + ): + self.n_components = n_components + self.kernel = kernel + self.kernel_params = kernel_params + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.alpha = alpha + self.fit_inverse_transform = fit_inverse_transform + self.eigen_solver = eigen_solver + self.tol = tol + self.max_iter = max_iter + self.iterated_power = iterated_power + self.remove_zero_eig = remove_zero_eig + self.random_state = random_state + self.n_jobs = n_jobs + self.copy_X = copy_X + + def _get_kernel(self, X, Y=None): + if callable(self.kernel): + params = self.kernel_params or {} + else: + params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0} + return pairwise_kernels( + X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params + ) + + def _fit_transform(self, K): + """Fit's using kernel K""" + # center kernel + K = self._centerer.fit_transform(K) + + # adjust n_components according to user inputs + if self.n_components is None: + n_components = K.shape[0] # use all dimensions + else: + n_components = min(K.shape[0], self.n_components) + + # compute eigenvectors + if self.eigen_solver == "auto": + if K.shape[0] > 200 and n_components < 10: + eigen_solver = "arpack" + else: + eigen_solver = "dense" + else: + eigen_solver = self.eigen_solver + + if eigen_solver == "dense": + # Note: subset_by_index specifies the indices of smallest/largest to return + self.eigenvalues_, self.eigenvectors_ = eigh( + K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1) + ) + elif eigen_solver == "arpack": + v0 = _init_arpack_v0(K.shape[0], self.random_state) + self.eigenvalues_, self.eigenvectors_ = eigsh( + K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0 + ) + elif eigen_solver == "randomized": + self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh( + K, + n_components=n_components, + n_iter=self.iterated_power, + random_state=self.random_state, + selection="module", + ) + + # make sure that the eigenvalues are ok and fix numerical issues + self.eigenvalues_ = _check_psd_eigenvalues( + self.eigenvalues_, enable_warnings=False + ) + + # flip eigenvectors' sign to enforce deterministic output + self.eigenvectors_, _ = svd_flip( + self.eigenvectors_, np.zeros_like(self.eigenvectors_).T + ) + + # sort eigenvectors in descending order + indices = self.eigenvalues_.argsort()[::-1] + self.eigenvalues_ = self.eigenvalues_[indices] + self.eigenvectors_ = self.eigenvectors_[:, indices] + + # remove eigenvectors with a zero eigenvalue (null space) if required + if self.remove_zero_eig or self.n_components is None: + self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0] + self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0] + + # Maintenance note on Eigenvectors normalization + # ---------------------------------------------- + # there is a link between + # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)' + # if v is an eigenvector of K + # then Phi(X)v is an eigenvector of Phi(X)Phi(X)' + # if u is an eigenvector of Phi(X)Phi(X)' + # then Phi(X)'u is an eigenvector of Phi(X)'Phi(X) + # + # At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale + # them so that eigenvectors in kernel feature space (the u) have norm=1 + # instead + # + # We COULD scale them here: + # self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_) + # + # But choose to perform that LATER when needed, in `fit()` and in + # `transform()`. + + return K + + def _fit_inverse_transform(self, X_transformed, X): + if hasattr(X, "tocsr"): + raise NotImplementedError( + "Inverse transform not implemented for sparse matrices!" + ) + + n_samples = X_transformed.shape[0] + K = self._get_kernel(X_transformed) + K.flat[:: n_samples + 1] += self.alpha + self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True) + self.X_transformed_fit_ = X_transformed + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + if self.fit_inverse_transform and self.kernel == "precomputed": + raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.") + X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X) + self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma + self._centerer = KernelCenterer().set_output(transform="default") + K = self._get_kernel(X) + self._fit_transform(K) + + if self.fit_inverse_transform: + # no need to use the kernel to transform X, use shortcut expression + X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) + + self._fit_inverse_transform(X_transformed, X) + + self.X_fit_ = X + return self + + def fit_transform(self, X, y=None, **params): + """Fit the model from data in X and transform X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + **params : kwargs + Parameters (keyword arguments) and values passed to + the fit_transform instance. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + """ + self.fit(X, **params) + + # no need to use the kernel to transform X, use shortcut expression + X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) + + if self.fit_inverse_transform: + self._fit_inverse_transform(X_transformed, X) + + return X_transformed + + def transform(self, X): + """Transform X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + + # Compute centered gram matrix between X and training data X_fit_ + K = self._centerer.transform(self._get_kernel(X, self.X_fit_)) + + # scale eigenvectors (properly account for null-space for dot product) + non_zeros = np.flatnonzero(self.eigenvalues_) + scaled_alphas = np.zeros_like(self.eigenvectors_) + scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt( + self.eigenvalues_[non_zeros] + ) + + # Project with a scalar product between K and the scaled eigenvectors + return np.dot(K, scaled_alphas) + + def inverse_transform(self, X): + """Transform X back to original space. + + ``inverse_transform`` approximates the inverse transformation using + a learned pre-image. The pre-image is learned by kernel ridge + regression of the original data on their low-dimensional representation + vectors. + + .. note: + :meth:`~sklearn.decomposition.fit` internally uses a centered + kernel. As the centered kernel no longer contains the information + of the mean of kernel features, such information is not taken into + account in reconstruction. + + .. note:: + When users want to compute inverse transformation for 'linear' + kernel, it is recommended that they use + :class:`~sklearn.decomposition.PCA` instead. Unlike + :class:`~sklearn.decomposition.PCA`, + :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform`` + does not reconstruct the mean of data when 'linear' kernel is used + due to the use of centered kernel. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_components) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Returns the instance itself. + + References + ---------- + `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. + "Learning to find pre-images." + Advances in neural information processing systems 16 (2004): 449-456. + `_ + """ + if not self.fit_inverse_transform: + raise NotFittedError( + "The fit_inverse_transform parameter was not" + " set to True when instantiating and hence " + "the inverse transform is not available." + ) + + K = self._get_kernel(X, self.X_transformed_fit_) + return np.dot(K, self.dual_coef_) + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + "pairwise": self.kernel == "precomputed", + } + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.eigenvalues_.shape[0] diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_lda.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_lda.py new file mode 100644 index 0000000000000000000000000000000000000000..9e161c178b9e327e4a5e6f6f0c0b3ed9c1cbd57f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_lda.py @@ -0,0 +1,929 @@ +""" + +============================================================= +Online Latent Dirichlet Allocation with variational inference +============================================================= + +This implementation is modified from Matthew D. Hoffman's onlineldavb code +Link: https://github.com/blei-lab/onlineldavb +""" + +# Author: Chyi-Kwei Yau +# Author: Matthew D. Hoffman (original onlineldavb implementation) +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from joblib import effective_n_jobs +from scipy.special import gammaln, logsumexp + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..utils import check_random_state, gen_batches, gen_even_slices +from ..utils._param_validation import Interval, StrOptions +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted, check_non_negative +from ._online_lda_fast import ( + _dirichlet_expectation_1d as cy_dirichlet_expectation_1d, +) +from ._online_lda_fast import ( + _dirichlet_expectation_2d, +) +from ._online_lda_fast import ( + mean_change as cy_mean_change, +) + +EPS = np.finfo(float).eps + + +def _update_doc_distribution( + X, + exp_topic_word_distr, + doc_topic_prior, + max_doc_update_iter, + mean_change_tol, + cal_sstats, + random_state, +): + """E-step: update document-topic distribution. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + exp_topic_word_distr : ndarray of shape (n_topics, n_features) + Exponential value of expectation of log topic word distribution. + In the literature, this is `exp(E[log(beta)])`. + + doc_topic_prior : float + Prior of document topic distribution `theta`. + + max_doc_update_iter : int + Max number of iterations for updating document topic distribution in + the E-step. + + mean_change_tol : float + Stopping tolerance for updating document topic distribution in E-step. + + cal_sstats : bool + Parameter that indicate to calculate sufficient statistics or not. + Set `cal_sstats` to `True` when we need to run M-step. + + random_state : RandomState instance or None + Parameter that indicate how to initialize document topic distribution. + Set `random_state` to None will initialize document topic distribution + to a constant number. + + Returns + ------- + (doc_topic_distr, suff_stats) : + `doc_topic_distr` is unnormalized topic distribution for each document. + In the literature, this is `gamma`. we can calculate `E[log(theta)]` + from it. + `suff_stats` is expected sufficient statistics for the M-step. + When `cal_sstats == False`, this will be None. + + """ + is_sparse_x = sp.issparse(X) + n_samples, n_features = X.shape + n_topics = exp_topic_word_distr.shape[0] + + if random_state: + doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype( + X.dtype, copy=False + ) + else: + doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype) + + # In the literature, this is `exp(E[log(theta)])` + exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr)) + + # diff on `component_` (only calculate it when `cal_diff` is True) + suff_stats = ( + np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None + ) + + if is_sparse_x: + X_data = X.data + X_indices = X.indices + X_indptr = X.indptr + + # These cython functions are called in a nested loop on usually very small arrays + # (length=n_topics). In that case, finding the appropriate signature of the + # fused-typed function can be more costly than its execution, hence the dispatch + # is done outside of the loop. + ctype = "float" if X.dtype == np.float32 else "double" + mean_change = cy_mean_change[ctype] + dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype] + eps = np.finfo(X.dtype).eps + + for idx_d in range(n_samples): + if is_sparse_x: + ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]] + cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]] + else: + ids = np.nonzero(X[idx_d, :])[0] + cnts = X[idx_d, ids] + + doc_topic_d = doc_topic_distr[idx_d, :] + # The next one is a copy, since the inner loop overwrites it. + exp_doc_topic_d = exp_doc_topic[idx_d, :].copy() + exp_topic_word_d = exp_topic_word_distr[:, ids] + + # Iterate between `doc_topic_d` and `norm_phi` until convergence + for _ in range(0, max_doc_update_iter): + last_d = doc_topic_d + + # The optimal phi_{dwk} is proportional to + # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]). + norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps + + doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T) + # Note: adds doc_topic_prior to doc_topic_d, in-place. + dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d) + + if mean_change(last_d, doc_topic_d) < mean_change_tol: + break + doc_topic_distr[idx_d, :] = doc_topic_d + + # Contribution of document d to the expected sufficient + # statistics for the M step. + if cal_sstats: + norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps + suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi) + + return (doc_topic_distr, suff_stats) + + +class LatentDirichletAllocation( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Latent Dirichlet Allocation with online variational Bayes algorithm. + + The implementation is based on [1]_ and [2]_. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=10 + Number of topics. + + .. versionchanged:: 0.19 + ``n_topics`` was renamed to ``n_components`` + + doc_topic_prior : float, default=None + Prior of document topic distribution `theta`. If the value is None, + defaults to `1 / n_components`. + In [1]_, this is called `alpha`. + + topic_word_prior : float, default=None + Prior of topic word distribution `beta`. If the value is None, defaults + to `1 / n_components`. + In [1]_, this is called `eta`. + + learning_method : {'batch', 'online'}, default='batch' + Method used to update `_component`. Only used in :meth:`fit` method. + In general, if the data size is large, the online update will be much + faster than the batch update. + + Valid options:: + + 'batch': Batch variational Bayes method. Use all training data in + each EM update. + Old `components_` will be overwritten in each iteration. + 'online': Online variational Bayes method. In each EM update, use + mini-batch of training data to update the ``components_`` + variable incrementally. The learning rate is controlled by the + ``learning_decay`` and the ``learning_offset`` parameters. + + .. versionchanged:: 0.20 + The default learning method is now ``"batch"``. + + learning_decay : float, default=0.7 + It is a parameter that control learning rate in the online learning + method. The value should be set between (0.5, 1.0] to guarantee + asymptotic convergence. When the value is 0.0 and batch_size is + ``n_samples``, the update method is same as batch learning. In the + literature, this is called kappa. + + learning_offset : float, default=10.0 + A (positive) parameter that downweights early iterations in online + learning. It should be greater than 1.0. In the literature, this is + called tau_0. + + max_iter : int, default=10 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the :meth:`fit` method, and not the + :meth:`partial_fit` method. + + batch_size : int, default=128 + Number of documents to use in each EM iteration. Only used in online + learning. + + evaluate_every : int, default=-1 + How often to evaluate perplexity. Only used in `fit` method. + set it to 0 or negative number to not evaluate perplexity in + training at all. Evaluating perplexity can help you check convergence + in training process, but it will also increase total training time. + Evaluating perplexity in every iteration might increase training time + up to two-fold. + + total_samples : int, default=1e6 + Total number of documents. Only used in the :meth:`partial_fit` method. + + perp_tol : float, default=1e-1 + Perplexity tolerance in batch learning. Only used when + ``evaluate_every`` is greater than 0. + + mean_change_tol : float, default=1e-3 + Stopping tolerance for updating document topic distribution in E-step. + + max_doc_update_iter : int, default=100 + Max number of iterations for updating document topic distribution in + the E-step. + + n_jobs : int, default=None + The number of jobs to use in the E-step. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + Verbosity level. + + random_state : int, RandomState instance or None, default=None + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Variational parameters for topic word distribution. Since the complete + conditional for topic word distribution is a Dirichlet, + ``components_[i, j]`` can be viewed as pseudocount that represents the + number of times word `j` was assigned to topic `i`. + It can also be viewed as distribution over the words for each topic + after normalization: + ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``. + + exp_dirichlet_component_ : ndarray of shape (n_components, n_features) + Exponential value of expectation of log topic word distribution. + In the literature, this is `exp(E[log(beta)])`. + + n_batch_iter_ : int + Number of iterations of the EM step. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of passes over the dataset. + + bound_ : float + Final perplexity score on training set. + + doc_topic_prior_ : float + Prior of document topic distribution `theta`. If the value is None, + it is `1 / n_components`. + + random_state_ : RandomState instance + RandomState instance that is generated either from a seed, the random + number generator or by `np.random`. + + topic_word_prior_ : float + Prior of topic word distribution `beta`. If the value is None, it is + `1 / n_components`. + + See Also + -------- + sklearn.discriminant_analysis.LinearDiscriminantAnalysis: + A classifier with a linear decision boundary, generated by fitting + class conditional densities to the data and using Bayes' rule. + + References + ---------- + .. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D. + Hoffman, David M. Blei, Francis Bach, 2010 + https://github.com/blei-lab/onlineldavb + + .. [2] "Stochastic Variational Inference", Matthew D. Hoffman, + David M. Blei, Chong Wang, John Paisley, 2013 + + Examples + -------- + >>> from sklearn.decomposition import LatentDirichletAllocation + >>> from sklearn.datasets import make_multilabel_classification + >>> # This produces a feature matrix of token counts, similar to what + >>> # CountVectorizer would produce on text. + >>> X, _ = make_multilabel_classification(random_state=0) + >>> lda = LatentDirichletAllocation(n_components=5, + ... random_state=0) + >>> lda.fit(X) + LatentDirichletAllocation(...) + >>> # get topics for some given samples: + >>> lda.transform(X[-2:]) + array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846], + [0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]]) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 0, None, closed="neither")], + "doc_topic_prior": [None, Interval(Real, 0, 1, closed="both")], + "topic_word_prior": [None, Interval(Real, 0, 1, closed="both")], + "learning_method": [StrOptions({"batch", "online"})], + "learning_decay": [Interval(Real, 0, 1, closed="both")], + "learning_offset": [Interval(Real, 1.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "batch_size": [Interval(Integral, 0, None, closed="neither")], + "evaluate_every": [Interval(Integral, None, None, closed="neither")], + "total_samples": [Interval(Real, 0, None, closed="neither")], + "perp_tol": [Interval(Real, 0, None, closed="left")], + "mean_change_tol": [Interval(Real, 0, None, closed="left")], + "max_doc_update_iter": [Interval(Integral, 0, None, closed="left")], + "n_jobs": [None, Integral], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=10, + *, + doc_topic_prior=None, + topic_word_prior=None, + learning_method="batch", + learning_decay=0.7, + learning_offset=10.0, + max_iter=10, + batch_size=128, + evaluate_every=-1, + total_samples=1e6, + perp_tol=1e-1, + mean_change_tol=1e-3, + max_doc_update_iter=100, + n_jobs=None, + verbose=0, + random_state=None, + ): + self.n_components = n_components + self.doc_topic_prior = doc_topic_prior + self.topic_word_prior = topic_word_prior + self.learning_method = learning_method + self.learning_decay = learning_decay + self.learning_offset = learning_offset + self.max_iter = max_iter + self.batch_size = batch_size + self.evaluate_every = evaluate_every + self.total_samples = total_samples + self.perp_tol = perp_tol + self.mean_change_tol = mean_change_tol + self.max_doc_update_iter = max_doc_update_iter + self.n_jobs = n_jobs + self.verbose = verbose + self.random_state = random_state + + def _init_latent_vars(self, n_features, dtype=np.float64): + """Initialize latent variables.""" + + self.random_state_ = check_random_state(self.random_state) + self.n_batch_iter_ = 1 + self.n_iter_ = 0 + + if self.doc_topic_prior is None: + self.doc_topic_prior_ = 1.0 / self.n_components + else: + self.doc_topic_prior_ = self.doc_topic_prior + + if self.topic_word_prior is None: + self.topic_word_prior_ = 1.0 / self.n_components + else: + self.topic_word_prior_ = self.topic_word_prior + + init_gamma = 100.0 + init_var = 1.0 / init_gamma + # In the literature, this is called `lambda` + self.components_ = self.random_state_.gamma( + init_gamma, init_var, (self.n_components, n_features) + ).astype(dtype, copy=False) + + # In the literature, this is `exp(E[log(beta)])` + self.exp_dirichlet_component_ = np.exp( + _dirichlet_expectation_2d(self.components_) + ) + + def _e_step(self, X, cal_sstats, random_init, parallel=None): + """E-step in EM update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + cal_sstats : bool + Parameter that indicate whether to calculate sufficient statistics + or not. Set ``cal_sstats`` to True when we need to run M-step. + + random_init : bool + Parameter that indicate whether to initialize document topic + distribution randomly in the E-step. Set it to True in training + steps. + + parallel : joblib.Parallel, default=None + Pre-initialized instance of joblib.Parallel. + + Returns + ------- + (doc_topic_distr, suff_stats) : + `doc_topic_distr` is unnormalized topic distribution for each + document. In the literature, this is called `gamma`. + `suff_stats` is expected sufficient statistics for the M-step. + When `cal_sstats == False`, it will be None. + + """ + + # Run e-step in parallel + random_state = self.random_state_ if random_init else None + + # TODO: make Parallel._effective_n_jobs public instead? + n_jobs = effective_n_jobs(self.n_jobs) + if parallel is None: + parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) + results = parallel( + delayed(_update_doc_distribution)( + X[idx_slice, :], + self.exp_dirichlet_component_, + self.doc_topic_prior_, + self.max_doc_update_iter, + self.mean_change_tol, + cal_sstats, + random_state, + ) + for idx_slice in gen_even_slices(X.shape[0], n_jobs) + ) + + # merge result + doc_topics, sstats_list = zip(*results) + doc_topic_distr = np.vstack(doc_topics) + + if cal_sstats: + # This step finishes computing the sufficient statistics for the + # M-step. + suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype) + for sstats in sstats_list: + suff_stats += sstats + suff_stats *= self.exp_dirichlet_component_ + else: + suff_stats = None + + return (doc_topic_distr, suff_stats) + + def _em_step(self, X, total_samples, batch_update, parallel=None): + """EM update for 1 iteration. + + update `_component` by batch VB or online VB. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + total_samples : int + Total number of documents. It is only used when + batch_update is `False`. + + batch_update : bool + Parameter that controls updating method. + `True` for batch learning, `False` for online learning. + + parallel : joblib.Parallel, default=None + Pre-initialized instance of joblib.Parallel + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Unnormalized document topic distribution. + """ + + # E-step + _, suff_stats = self._e_step( + X, cal_sstats=True, random_init=True, parallel=parallel + ) + + # M-step + if batch_update: + self.components_ = self.topic_word_prior_ + suff_stats + else: + # online update + # In the literature, the weight is `rho` + weight = np.power( + self.learning_offset + self.n_batch_iter_, -self.learning_decay + ) + doc_ratio = float(total_samples) / X.shape[0] + self.components_ *= 1 - weight + self.components_ += weight * ( + self.topic_word_prior_ + doc_ratio * suff_stats + ) + + # update `component_` related variables + self.exp_dirichlet_component_ = np.exp( + _dirichlet_expectation_2d(self.components_) + ) + self.n_batch_iter_ += 1 + return + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + "requires_positive_X": True, + } + + def _check_non_neg_array(self, X, reset_n_features, whom): + """check X format + + check X format and make sure no negative value in X. + + Parameters + ---------- + X : array-like or sparse matrix + + """ + dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype + + X = self._validate_data( + X, + reset=reset_n_features, + accept_sparse="csr", + dtype=dtype, + ) + check_non_negative(X, whom) + + return X + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Online VB with Mini-Batch update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Partially fitted estimator. + """ + first_time = not hasattr(self, "components_") + + X = self._check_non_neg_array( + X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit" + ) + n_samples, n_features = X.shape + batch_size = self.batch_size + + # initialize parameters or check + if first_time: + self._init_latent_vars(n_features, dtype=X.dtype) + + if n_features != self.components_.shape[1]: + raise ValueError( + "The provided data has %d dimensions while " + "the model was trained with feature size %d." + % (n_features, self.components_.shape[1]) + ) + + n_jobs = effective_n_jobs(self.n_jobs) + with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: + for idx_slice in gen_batches(n_samples, batch_size): + self._em_step( + X[idx_slice, :], + total_samples=self.total_samples, + batch_update=False, + parallel=parallel, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn model for the data X with variational Bayes method. + + When `learning_method` is 'online', use mini-batch update. + Otherwise, use batch update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Fitted estimator. + """ + X = self._check_non_neg_array( + X, reset_n_features=True, whom="LatentDirichletAllocation.fit" + ) + n_samples, n_features = X.shape + max_iter = self.max_iter + evaluate_every = self.evaluate_every + learning_method = self.learning_method + + batch_size = self.batch_size + + # initialize parameters + self._init_latent_vars(n_features, dtype=X.dtype) + # change to perplexity later + last_bound = None + n_jobs = effective_n_jobs(self.n_jobs) + with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: + for i in range(max_iter): + if learning_method == "online": + for idx_slice in gen_batches(n_samples, batch_size): + self._em_step( + X[idx_slice, :], + total_samples=n_samples, + batch_update=False, + parallel=parallel, + ) + else: + # batch update + self._em_step( + X, total_samples=n_samples, batch_update=True, parallel=parallel + ) + + # check perplexity + if evaluate_every > 0 and (i + 1) % evaluate_every == 0: + doc_topics_distr, _ = self._e_step( + X, cal_sstats=False, random_init=False, parallel=parallel + ) + bound = self._perplexity_precomp_distr( + X, doc_topics_distr, sub_sampling=False + ) + if self.verbose: + print( + "iteration: %d of max_iter: %d, perplexity: %.4f" + % (i + 1, max_iter, bound) + ) + + if last_bound and abs(last_bound - bound) < self.perp_tol: + break + last_bound = bound + + elif self.verbose: + print("iteration: %d of max_iter: %d" % (i + 1, max_iter)) + self.n_iter_ += 1 + + # calculate final perplexity value on train set + doc_topics_distr, _ = self._e_step( + X, cal_sstats=False, random_init=False, parallel=parallel + ) + self.bound_ = self._perplexity_precomp_distr( + X, doc_topics_distr, sub_sampling=False + ) + + return self + + def _unnormalized_transform(self, X): + """Transform data X according to fitted model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution for X. + """ + doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False) + + return doc_topic_distr + + def transform(self, X): + """Transform data X according to the fitted model. + + .. versionchanged:: 0.18 + *doc_topic_distr* is now normalized + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution for X. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=False, whom="LatentDirichletAllocation.transform" + ) + doc_topic_distr = self._unnormalized_transform(X) + doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis] + return doc_topic_distr + + def _approx_bound(self, X, doc_topic_distr, sub_sampling): + """Estimate the variational bound. + + Estimate the variational bound over "all documents" using only the + documents passed in as X. Since log-likelihood of each word cannot + be computed directly, we use this bound to estimate it. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution. In the literature, this is called + gamma. + + sub_sampling : bool, default=False + Compensate for subsampling of documents. + It is used in calculate bound in online learning. + + Returns + ------- + score : float + + """ + + def _loglikelihood(prior, distr, dirichlet_distr, size): + # calculate log-likelihood + score = np.sum((prior - distr) * dirichlet_distr) + score += np.sum(gammaln(distr) - gammaln(prior)) + score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1))) + return score + + is_sparse_x = sp.issparse(X) + n_samples, n_components = doc_topic_distr.shape + n_features = self.components_.shape[1] + score = 0 + + dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr) + dirichlet_component_ = _dirichlet_expectation_2d(self.components_) + doc_topic_prior = self.doc_topic_prior_ + topic_word_prior = self.topic_word_prior_ + + if is_sparse_x: + X_data = X.data + X_indices = X.indices + X_indptr = X.indptr + + # E[log p(docs | theta, beta)] + for idx_d in range(0, n_samples): + if is_sparse_x: + ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]] + cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]] + else: + ids = np.nonzero(X[idx_d, :])[0] + cnts = X[idx_d, ids] + temp = ( + dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids] + ) + norm_phi = logsumexp(temp, axis=0) + score += np.dot(cnts, norm_phi) + + # compute E[log p(theta | alpha) - log q(theta | gamma)] + score += _loglikelihood( + doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components + ) + + # Compensate for the subsampling of the population of documents + if sub_sampling: + doc_ratio = float(self.total_samples) / n_samples + score *= doc_ratio + + # E[log p(beta | eta) - log q (beta | lambda)] + score += _loglikelihood( + topic_word_prior, self.components_, dirichlet_component_, n_features + ) + + return score + + def score(self, X, y=None): + """Calculate approximate log-likelihood as score. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + score : float + Use approximate bound as score. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=False, whom="LatentDirichletAllocation.score" + ) + + doc_topic_distr = self._unnormalized_transform(X) + score = self._approx_bound(X, doc_topic_distr, sub_sampling=False) + return score + + def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False): + """Calculate approximate perplexity for data X with ability to accept + precomputed doc_topic_distr + + Perplexity is defined as exp(-1. * log-likelihood per word) + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + doc_topic_distr : ndarray of shape (n_samples, n_components), \ + default=None + Document topic distribution. + If it is None, it will be generated by applying transform on X. + + Returns + ------- + score : float + Perplexity score. + """ + if doc_topic_distr is None: + doc_topic_distr = self._unnormalized_transform(X) + else: + n_samples, n_components = doc_topic_distr.shape + if n_samples != X.shape[0]: + raise ValueError( + "Number of samples in X and doc_topic_distr do not match." + ) + + if n_components != self.n_components: + raise ValueError("Number of topics does not match.") + + current_samples = X.shape[0] + bound = self._approx_bound(X, doc_topic_distr, sub_sampling) + + if sub_sampling: + word_cnt = X.sum() * (float(self.total_samples) / current_samples) + else: + word_cnt = X.sum() + perword_bound = bound / word_cnt + + return np.exp(-1.0 * perword_bound) + + def perplexity(self, X, sub_sampling=False): + """Calculate approximate perplexity for data X. + + Perplexity is defined as exp(-1. * log-likelihood per word) + + .. versionchanged:: 0.19 + *doc_topic_distr* argument has been deprecated and is ignored + because user no longer has access to unnormalized distribution + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + sub_sampling : bool + Do sub-sampling or not. + + Returns + ------- + score : float + Perplexity score. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity" + ) + return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling) + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py new file mode 100644 index 0000000000000000000000000000000000000000..db46540e26708b897ed8389efa669c5312e729f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py @@ -0,0 +1,2443 @@ +""" Non-negative matrix factorization. +""" +# Author: Vlad Niculae +# Lars Buitinck +# Mathieu Blondel +# Tom Dupre la Tour +# License: BSD 3 clause + +import itertools +import time +import warnings +from abc import ABC +from math import sqrt +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy import linalg + +from .._config import config_context +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_array, check_random_state, gen_batches, metadata_routing +from ..utils._param_validation import ( + Hidden, + Interval, + StrOptions, + validate_params, +) +from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm +from ..utils.validation import ( + check_is_fitted, + check_non_negative, +) +from ._cdnmf_fast import _update_cdnmf_fast + +EPSILON = np.finfo(np.float32).eps + + +def norm(x): + """Dot product-based Euclidean norm implementation. + + See: http://fa.bianp.net/blog/2011/computing-the-vector-norm/ + + Parameters + ---------- + x : array-like + Vector for which to compute the norm. + """ + return sqrt(squared_norm(x)) + + +def trace_dot(X, Y): + """Trace of np.dot(X, Y.T). + + Parameters + ---------- + X : array-like + First matrix. + Y : array-like + Second matrix. + """ + return np.dot(X.ravel(), Y.ravel()) + + +def _check_init(A, shape, whom): + A = check_array(A) + if shape[0] != "auto" and A.shape[0] != shape[0]: + raise ValueError( + f"Array with wrong first dimension passed to {whom}. Expected {shape[0]}, " + f"but got {A.shape[0]}." + ) + if shape[1] != "auto" and A.shape[1] != shape[1]: + raise ValueError( + f"Array with wrong second dimension passed to {whom}. Expected {shape[1]}, " + f"but got {A.shape[1]}." + ) + check_non_negative(A, whom) + if np.max(A) == 0: + raise ValueError(f"Array passed to {whom} is full of zeros.") + + +def _beta_divergence(X, W, H, beta, square_root=False): + """Compute the beta-divergence of X and dot(W, H). + + Parameters + ---------- + X : float or array-like of shape (n_samples, n_features) + + W : float or array-like of shape (n_samples, n_components) + + H : float or array-like of shape (n_components, n_features) + + beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'} + Parameter of the beta-divergence. + If beta == 2, this is half the Frobenius *squared* norm. + If beta == 1, this is the generalized Kullback-Leibler divergence. + If beta == 0, this is the Itakura-Saito divergence. + Else, this is the general beta-divergence. + + square_root : bool, default=False + If True, return np.sqrt(2 * res) + For beta == 2, it corresponds to the Frobenius norm. + + Returns + ------- + res : float + Beta divergence of X and np.dot(X, H). + """ + beta = _beta_loss_to_float(beta) + + # The method can be called with scalars + if not sp.issparse(X): + X = np.atleast_2d(X) + W = np.atleast_2d(W) + H = np.atleast_2d(H) + + # Frobenius norm + if beta == 2: + # Avoid the creation of the dense np.dot(W, H) if X is sparse. + if sp.issparse(X): + norm_X = np.dot(X.data, X.data) + norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H) + cross_prod = trace_dot((X @ H.T), W) + res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0 + else: + res = squared_norm(X - np.dot(W, H)) / 2.0 + + if square_root: + return np.sqrt(res * 2) + else: + return res + + if sp.issparse(X): + # compute np.dot(W, H) only where X is nonzero + WH_data = _special_sparse_dot(W, H, X).data + X_data = X.data + else: + WH = np.dot(W, H) + WH_data = WH.ravel() + X_data = X.ravel() + + # do not affect the zeros: here 0 ** (-1) = 0 and not infinity + indices = X_data > EPSILON + WH_data = WH_data[indices] + X_data = X_data[indices] + + # used to avoid division by zero + WH_data[WH_data < EPSILON] = EPSILON + + # generalized Kullback-Leibler divergence + if beta == 1: + # fast and memory efficient computation of np.sum(np.dot(W, H)) + sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) + # computes np.sum(X * log(X / WH)) only where X is nonzero + div = X_data / WH_data + res = np.dot(X_data, np.log(div)) + # add full np.sum(np.dot(W, H)) - np.sum(X) + res += sum_WH - X_data.sum() + + # Itakura-Saito divergence + elif beta == 0: + div = X_data / WH_data + res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div)) + + # beta-divergence, beta not in (0, 1, 2) + else: + if sp.issparse(X): + # slow loop, but memory efficient computation of : + # np.sum(np.dot(W, H) ** beta) + sum_WH_beta = 0 + for i in range(X.shape[1]): + sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) + + else: + sum_WH_beta = np.sum(WH**beta) + + sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) + res = (X_data**beta).sum() - beta * sum_X_WH + res += sum_WH_beta * (beta - 1) + res /= beta * (beta - 1) + + if square_root: + res = max(res, 0) # avoid negative number due to rounding errors + return np.sqrt(2 * res) + else: + return res + + +def _special_sparse_dot(W, H, X): + """Computes np.dot(W, H), only where X is non zero.""" + if sp.issparse(X): + ii, jj = X.nonzero() + n_vals = ii.shape[0] + dot_vals = np.empty(n_vals) + n_components = W.shape[1] + + batch_size = max(n_components, n_vals // n_components) + for start in range(0, n_vals, batch_size): + batch = slice(start, start + batch_size) + dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum( + axis=1 + ) + + WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape) + return WH.tocsr() + else: + return np.dot(W, H) + + +def _beta_loss_to_float(beta_loss): + """Convert string beta_loss to float.""" + beta_loss_map = {"frobenius": 2, "kullback-leibler": 1, "itakura-saito": 0} + if isinstance(beta_loss, str): + beta_loss = beta_loss_map[beta_loss] + return beta_loss + + +def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None): + """Algorithms for NMF initialization. + + Computes an initial guess for the non-negative + rank k matrix approximation for X: X = WH. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix to be decomposed. + + n_components : int + The number of components desired in the approximation. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None + Method used to initialize the procedure. + Valid options: + + - None: 'nndsvda' if n_components <= min(n_samples, n_features), + otherwise 'random'. + + - 'random': non-negative random matrices, scaled with: + sqrt(X.mean() / n_components) + + - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + + - 'nndsvda': NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + + - 'nndsvdar': NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + + - 'custom': use custom matrices W and H + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + eps : float, default=1e-6 + Truncate all values less then this in output to zero. + + random_state : int, RandomState instance or None, default=None + Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + W : array-like of shape (n_samples, n_components) + Initial guesses for solving X ~= WH. + + H : array-like of shape (n_components, n_features) + Initial guesses for solving X ~= WH. + + References + ---------- + C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for + nonnegative matrix factorization - Pattern Recognition, 2008 + http://tinyurl.com/nndsvd + """ + check_non_negative(X, "NMF initialization") + n_samples, n_features = X.shape + + if ( + init is not None + and init != "random" + and n_components > min(n_samples, n_features) + ): + raise ValueError( + "init = '{}' can only be used when " + "n_components <= min(n_samples, n_features)".format(init) + ) + + if init is None: + if n_components <= min(n_samples, n_features): + init = "nndsvda" + else: + init = "random" + + # Random initialization + if init == "random": + avg = np.sqrt(X.mean() / n_components) + rng = check_random_state(random_state) + H = avg * rng.standard_normal(size=(n_components, n_features)).astype( + X.dtype, copy=False + ) + W = avg * rng.standard_normal(size=(n_samples, n_components)).astype( + X.dtype, copy=False + ) + np.abs(H, out=H) + np.abs(W, out=W) + return W, H + + # NNDSVD initialization + U, S, V = randomized_svd(X, n_components, random_state=random_state) + W = np.zeros_like(U) + H = np.zeros_like(V) + + # The leading singular triplet is non-negative + # so it can be used as is for initialization. + W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0]) + H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :]) + + for j in range(1, n_components): + x, y = U[:, j], V[j, :] + + # extract positive and negative parts of column vectors + x_p, y_p = np.maximum(x, 0), np.maximum(y, 0) + x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0)) + + # and their norms + x_p_nrm, y_p_nrm = norm(x_p), norm(y_p) + x_n_nrm, y_n_nrm = norm(x_n), norm(y_n) + + m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm + + # choose update + if m_p > m_n: + u = x_p / x_p_nrm + v = y_p / y_p_nrm + sigma = m_p + else: + u = x_n / x_n_nrm + v = y_n / y_n_nrm + sigma = m_n + + lbd = np.sqrt(S[j] * sigma) + W[:, j] = lbd * u + H[j, :] = lbd * v + + W[W < eps] = 0 + H[H < eps] = 0 + + if init == "nndsvd": + pass + elif init == "nndsvda": + avg = X.mean() + W[W == 0] = avg + H[H == 0] = avg + elif init == "nndsvdar": + rng = check_random_state(random_state) + avg = X.mean() + W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100) + H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100) + else: + raise ValueError( + "Invalid init parameter: got %r instead of one of %r" + % (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar")) + ) + + return W, H + + +def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state): + """Helper function for _fit_coordinate_descent. + + Update W to minimize the objective function, iterating once over all + coordinates. By symmetry, to update H, one can call + _update_coordinate_descent(X.T, Ht, W, ...). + + """ + n_components = Ht.shape[1] + + HHt = np.dot(Ht.T, Ht) + XHt = safe_sparse_dot(X, Ht) + + # L2 regularization corresponds to increase of the diagonal of HHt + if l2_reg != 0.0: + # adds l2_reg only on the diagonal + HHt.flat[:: n_components + 1] += l2_reg + # L1 regularization corresponds to decrease of each element of XHt + if l1_reg != 0.0: + XHt -= l1_reg + + if shuffle: + permutation = random_state.permutation(n_components) + else: + permutation = np.arange(n_components) + # The following seems to be required on 64-bit Windows w/ Python 3.5. + permutation = np.asarray(permutation, dtype=np.intp) + return _update_cdnmf_fast(W, HHt, XHt, permutation) + + +def _fit_coordinate_descent( + X, + W, + H, + tol=1e-4, + max_iter=200, + l1_reg_W=0, + l1_reg_H=0, + l2_reg_W=0, + l2_reg_H=0, + update_H=True, + verbose=0, + shuffle=False, + random_state=None, +): + """Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent + + The objective function is minimized with an alternating minimization of W + and H. Each minimization is done with a cyclic (up to a permutation of the + features) Coordinate Descent. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Constant matrix. + + W : array-like of shape (n_samples, n_components) + Initial guess for the solution. + + H : array-like of shape (n_components, n_features) + Initial guess for the solution. + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + l1_reg_W : float, default=0. + L1 regularization parameter for W. + + l1_reg_H : float, default=0. + L1 regularization parameter for H. + + l2_reg_W : float, default=0. + L2 regularization parameter for W. + + l2_reg_H : float, default=0. + L2 regularization parameter for H. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + verbose : int, default=0 + The verbosity level. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + random_state : int, RandomState instance or None, default=None + Used to randomize the coordinates in the CD solver, when + ``shuffle`` is set to ``True``. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + The number of iterations done by the algorithm. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + """ + # so W and Ht are both in C order in memory + Ht = check_array(H.T, order="C") + X = check_array(X, accept_sparse="csr") + + rng = check_random_state(random_state) + + for n_iter in range(1, max_iter + 1): + violation = 0.0 + + # Update W + violation += _update_coordinate_descent( + X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng + ) + # Update H + if update_H: + violation += _update_coordinate_descent( + X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng + ) + + if n_iter == 1: + violation_init = violation + + if violation_init == 0: + break + + if verbose: + print("violation:", violation / violation_init) + + if violation / violation_init <= tol: + if verbose: + print("Converged at iteration", n_iter + 1) + break + + return W, Ht.T, n_iter + + +def _multiplicative_update_w( + X, + W, + H, + beta_loss, + l1_reg_W, + l2_reg_W, + gamma, + H_sum=None, + HHt=None, + XHt=None, + update_H=True, +): + """Update W in Multiplicative Update NMF.""" + if beta_loss == 2: + # Numerator + if XHt is None: + XHt = safe_sparse_dot(X, H.T) + if update_H: + # avoid a copy of XHt, which will be re-computed (update_H=True) + numerator = XHt + else: + # preserve the XHt, which is not re-computed (update_H=False) + numerator = XHt.copy() + + # Denominator + if HHt is None: + HHt = np.dot(H, H.T) + denominator = np.dot(W, HHt) + + else: + # Numerator + # if X is sparse, compute WH only where X is non zero + WH_safe_X = _special_sparse_dot(W, H, X) + if sp.issparse(X): + WH_safe_X_data = WH_safe_X.data + X_data = X.data + else: + WH_safe_X_data = WH_safe_X + X_data = X + # copy used in the Denominator + WH = WH_safe_X.copy() + if beta_loss - 1.0 < 0: + WH[WH < EPSILON] = EPSILON + + # to avoid taking a negative power of zero + if beta_loss - 2.0 < 0: + WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON + + if beta_loss == 1: + np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) + elif beta_loss == 0: + # speeds up computation time + # refer to /numpy/numpy/issues/9363 + WH_safe_X_data **= -1 + WH_safe_X_data **= 2 + # element-wise multiplication + WH_safe_X_data *= X_data + else: + WH_safe_X_data **= beta_loss - 2 + # element-wise multiplication + WH_safe_X_data *= X_data + + # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T) + numerator = safe_sparse_dot(WH_safe_X, H.T) + + # Denominator + if beta_loss == 1: + if H_sum is None: + H_sum = np.sum(H, axis=1) # shape(n_components, ) + denominator = H_sum[np.newaxis, :] + + else: + # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T) + if sp.issparse(X): + # memory efficient computation + # (compute row by row, avoiding the dense matrix WH) + WHHt = np.empty(W.shape) + for i in range(X.shape[0]): + WHi = np.dot(W[i, :], H) + if beta_loss - 1 < 0: + WHi[WHi < EPSILON] = EPSILON + WHi **= beta_loss - 1 + WHHt[i, :] = np.dot(WHi, H.T) + else: + WH **= beta_loss - 1 + WHHt = np.dot(WH, H.T) + denominator = WHHt + + # Add L1 and L2 regularization + if l1_reg_W > 0: + denominator += l1_reg_W + if l2_reg_W > 0: + denominator = denominator + l2_reg_W * W + denominator[denominator == 0] = EPSILON + + numerator /= denominator + delta_W = numerator + + # gamma is in ]0, 1] + if gamma != 1: + delta_W **= gamma + + W *= delta_W + + return W, H_sum, HHt, XHt + + +def _multiplicative_update_h( + X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None +): + """update H in Multiplicative Update NMF.""" + if beta_loss == 2: + numerator = safe_sparse_dot(W.T, X) + denominator = np.linalg.multi_dot([W.T, W, H]) + + else: + # Numerator + WH_safe_X = _special_sparse_dot(W, H, X) + if sp.issparse(X): + WH_safe_X_data = WH_safe_X.data + X_data = X.data + else: + WH_safe_X_data = WH_safe_X + X_data = X + # copy used in the Denominator + WH = WH_safe_X.copy() + if beta_loss - 1.0 < 0: + WH[WH < EPSILON] = EPSILON + + # to avoid division by zero + if beta_loss - 2.0 < 0: + WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON + + if beta_loss == 1: + np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) + elif beta_loss == 0: + # speeds up computation time + # refer to /numpy/numpy/issues/9363 + WH_safe_X_data **= -1 + WH_safe_X_data **= 2 + # element-wise multiplication + WH_safe_X_data *= X_data + else: + WH_safe_X_data **= beta_loss - 2 + # element-wise multiplication + WH_safe_X_data *= X_data + + # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X) + numerator = safe_sparse_dot(W.T, WH_safe_X) + + # Denominator + if beta_loss == 1: + W_sum = np.sum(W, axis=0) # shape(n_components, ) + W_sum[W_sum == 0] = 1.0 + denominator = W_sum[:, np.newaxis] + + # beta_loss not in (1, 2) + else: + # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1) + if sp.issparse(X): + # memory efficient computation + # (compute column by column, avoiding the dense matrix WH) + WtWH = np.empty(H.shape) + for i in range(X.shape[1]): + WHi = np.dot(W, H[:, i]) + if beta_loss - 1 < 0: + WHi[WHi < EPSILON] = EPSILON + WHi **= beta_loss - 1 + WtWH[:, i] = np.dot(W.T, WHi) + else: + WH **= beta_loss - 1 + WtWH = np.dot(W.T, WH) + denominator = WtWH + + # Add L1 and L2 regularization + if l1_reg_H > 0: + denominator += l1_reg_H + if l2_reg_H > 0: + denominator = denominator + l2_reg_H * H + denominator[denominator == 0] = EPSILON + + if A is not None and B is not None: + # Updates for the online nmf + if gamma != 1: + H **= 1 / gamma + numerator *= H + A *= rho + B *= rho + A += numerator + B += denominator + H = A / B + + if gamma != 1: + H **= gamma + else: + delta_H = numerator + delta_H /= denominator + if gamma != 1: + delta_H **= gamma + H *= delta_H + + return H + + +def _fit_multiplicative_update( + X, + W, + H, + beta_loss="frobenius", + max_iter=200, + tol=1e-4, + l1_reg_W=0, + l1_reg_H=0, + l2_reg_W=0, + l2_reg_H=0, + update_H=True, + verbose=0, +): + """Compute Non-negative Matrix Factorization with Multiplicative Update. + + The objective function is _beta_divergence(X, WH) and is minimized with an + alternating minimization of W and H. Each minimization is done with a + Multiplicative Update. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Constant input matrix. + + W : array-like of shape (n_samples, n_components) + Initial guess for the solution. + + H : array-like of shape (n_components, n_features) + Initial guess for the solution. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. + + max_iter : int, default=200 + Number of iterations. + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + l1_reg_W : float, default=0. + L1 regularization parameter for W. + + l1_reg_H : float, default=0. + L1 regularization parameter for H. + + l2_reg_W : float, default=0. + L2 regularization parameter for W. + + l2_reg_H : float, default=0. + L2 regularization parameter for H. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + verbose : int, default=0 + The verbosity level. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + The number of iterations done by the algorithm. + + References + ---------- + Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix + Factorization. Adv. Neural Inform. Process. Syst.. 13. + Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix + factorization with the beta-divergence. Neural Computation, 23(9). + """ + start_time = time.time() + + beta_loss = _beta_loss_to_float(beta_loss) + + # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] + if beta_loss < 1: + gamma = 1.0 / (2.0 - beta_loss) + elif beta_loss > 2: + gamma = 1.0 / (beta_loss - 1.0) + else: + gamma = 1.0 + + # used for the convergence criterion + error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True) + previous_error = error_at_init + + H_sum, HHt, XHt = None, None, None + for n_iter in range(1, max_iter + 1): + # update W + # H_sum, HHt and XHt are saved and reused if not update_H + W, H_sum, HHt, XHt = _multiplicative_update_w( + X, + W, + H, + beta_loss=beta_loss, + l1_reg_W=l1_reg_W, + l2_reg_W=l2_reg_W, + gamma=gamma, + H_sum=H_sum, + HHt=HHt, + XHt=XHt, + update_H=update_H, + ) + + # necessary for stability with beta_loss < 1 + if beta_loss < 1: + W[W < np.finfo(np.float64).eps] = 0.0 + + # update H (only at fit or fit_transform) + if update_H: + H = _multiplicative_update_h( + X, + W, + H, + beta_loss=beta_loss, + l1_reg_H=l1_reg_H, + l2_reg_H=l2_reg_H, + gamma=gamma, + ) + + # These values will be recomputed since H changed + H_sum, HHt, XHt = None, None, None + + # necessary for stability with beta_loss < 1 + if beta_loss <= 1: + H[H < np.finfo(np.float64).eps] = 0.0 + + # test convergence criterion every 10 iterations + if tol > 0 and n_iter % 10 == 0: + error = _beta_divergence(X, W, H, beta_loss, square_root=True) + + if verbose: + iter_time = time.time() + print( + "Epoch %02d reached after %.3f seconds, error: %f" + % (n_iter, iter_time - start_time, error) + ) + + if (previous_error - error) / error_at_init < tol: + break + previous_error = error + + # do not print if we have already printed in the convergence test + if verbose and (tol == 0 or n_iter % 10 != 0): + end_time = time.time() + print( + "Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time) + ) + + return W, H, n_iter + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "W": ["array-like", None], + "H": ["array-like", None], + "update_H": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def non_negative_factorization( + X, + W=None, + H=None, + n_components="warn", + *, + init=None, + update_H=True, + solver="cd", + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + random_state=None, + verbose=0, + shuffle=False, +): + """Compute Non-negative Matrix Factorization (NMF). + + Find two non-negative matrices (W, H) whose product approximates the non- + negative matrix X. This factorization can be used for example for + dimensionality reduction, source separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}^2` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The regularization terms are scaled by `n_features` for `W` and by `n_samples` for + `H` to keep their impact balanced with respect to one another and to the data fit + term as independent as possible of the size `n_samples` of the training set. + + The objective function is minimized with an alternating minimization of W + and H. If H is given and update_H=False, it solves for W only. + + Note that the transformed data is named W and the components matrix is named H. In + the NMF literature, the naming convention is usually the opposite since the data + matrix X is transposed. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Constant matrix. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + n_components : int or {'auto'} or None, default=None + Number of components, if n_components is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from `W` or `H` shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + + Valid options: + + - None: 'nndsvda' if n_components < n_features, otherwise 'random'. + - 'random': non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + - 'nndsvda': NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + - 'nndsvdar': NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + - 'custom': If `update_H=True`, use custom matrices W and H which must both + be provided. If `update_H=False`, then only custom matrix H is used. + + .. versionchanged:: 0.23 + The default value of `init` changed from 'random' to None in 0.23. + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + solver : {'cd', 'mu'}, default='cd' + Numerical solver to use: + + - 'cd' is a Coordinate Descent solver that uses Fast Hierarchical + Alternating Least Squares (Fast HALS). + - 'mu' is a Multiplicative Update solver. + + .. versionadded:: 0.17 + Coordinate Descent solver. + + .. versionadded:: 0.19 + Multiplicative Update solver. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. Used only in 'mu' solver. + + .. versionadded:: 0.19 + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + .. versionadded:: 1.0 + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + .. versionadded:: 1.0 + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + random_state : int, RandomState instance or None, default=None + Used for NMF initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + verbose : int, default=0 + The verbosity level. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + Actual number of iterations. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import non_negative_factorization + >>> W, H, n_iter = non_negative_factorization( + ... X, n_components=2, init='random', random_state=0) + """ + est = NMF( + n_components=n_components, + init=init, + solver=solver, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + shuffle=shuffle, + ) + est._validate_params() + + X = check_array(X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]) + + with config_context(assume_finite=True): + W, H, n_iter = est._fit_transform(X, W=W, H=H, update_H=update_H) + + return W, H, n_iter + + +class _BaseNMF(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, ABC): + """Base class for NMF and MiniBatchNMF.""" + + # This prevents ``set_split_inverse_transform`` to be generated for the + # non-standard ``W`` arg on ``inverse_transform``. + # TODO: remove when W is removed in v1.5 for inverse_transform + __metadata_request__inverse_transform = {"W": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + StrOptions({"auto"}), + Hidden(StrOptions({"warn"})), + ], + "init": [ + StrOptions({"random", "nndsvd", "nndsvda", "nndsvdar", "custom"}), + None, + ], + "beta_loss": [ + StrOptions({"frobenius", "kullback-leibler", "itakura-saito"}), + Real, + ], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "alpha_W": [Interval(Real, 0, None, closed="left")], + "alpha_H": [Interval(Real, 0, None, closed="left"), StrOptions({"same"})], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "verbose": ["verbose"], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + random_state=None, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + verbose=0, + ): + self.n_components = n_components + self.init = init + self.beta_loss = beta_loss + self.tol = tol + self.max_iter = max_iter + self.random_state = random_state + self.alpha_W = alpha_W + self.alpha_H = alpha_H + self.l1_ratio = l1_ratio + self.verbose = verbose + + def _check_params(self, X): + # n_components + self._n_components = self.n_components + if self.n_components == "warn": + warnings.warn( + ( + "The default value of `n_components` will change from `None` to" + " `'auto'` in 1.6. Set the value of `n_components` to `None`" + " explicitly to suppress the warning." + ), + FutureWarning, + ) + self._n_components = None # Keeping the old default value + if self._n_components is None: + self._n_components = X.shape[1] + + # beta_loss + self._beta_loss = _beta_loss_to_float(self.beta_loss) + + def _check_w_h(self, X, W, H, update_H): + """Check W and H, or initialize them.""" + n_samples, n_features = X.shape + + if self.init == "custom" and update_H: + _check_init(H, (self._n_components, n_features), "NMF (input H)") + _check_init(W, (n_samples, self._n_components), "NMF (input W)") + if self._n_components == "auto": + self._n_components = H.shape[0] + + if H.dtype != X.dtype or W.dtype != X.dtype: + raise TypeError( + "H and W should have the same dtype as X. Got " + "H.dtype = {} and W.dtype = {}.".format(H.dtype, W.dtype) + ) + + elif not update_H: + if W is not None: + warnings.warn( + "When update_H=False, the provided initial W is not used.", + RuntimeWarning, + ) + + _check_init(H, (self._n_components, n_features), "NMF (input H)") + if self._n_components == "auto": + self._n_components = H.shape[0] + + if H.dtype != X.dtype: + raise TypeError( + "H should have the same dtype as X. Got H.dtype = {}.".format( + H.dtype + ) + ) + + # 'mu' solver should not be initialized by zeros + if self.solver == "mu": + avg = np.sqrt(X.mean() / self._n_components) + W = np.full((n_samples, self._n_components), avg, dtype=X.dtype) + else: + W = np.zeros((n_samples, self._n_components), dtype=X.dtype) + + else: + if W is not None or H is not None: + warnings.warn( + ( + "When init!='custom', provided W or H are ignored. Set " + " init='custom' to use them as initialization." + ), + RuntimeWarning, + ) + + if self._n_components == "auto": + self._n_components = X.shape[1] + + W, H = _initialize_nmf( + X, self._n_components, init=self.init, random_state=self.random_state + ) + + return W, H + + def _compute_regularization(self, X): + """Compute scaled regularization terms.""" + n_samples, n_features = X.shape + alpha_W = self.alpha_W + alpha_H = self.alpha_W if self.alpha_H == "same" else self.alpha_H + + l1_reg_W = n_features * alpha_W * self.l1_ratio + l1_reg_H = n_samples * alpha_H * self.l1_ratio + l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio) + l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio) + + return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H + + def fit(self, X, y=None, **params): + """Learn a NMF model for the data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + **params : kwargs + Parameters (keyword arguments) and values passed to + the fit_transform instance. + + Returns + ------- + self : object + Returns the instance itself. + """ + # param validation is done in fit_transform + + self.fit_transform(X, **params) + return self + + def inverse_transform(self, Xt=None, W=None): + """Transform data back to its original space. + + .. versionadded:: 0.18 + + Parameters + ---------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_components) + Transformed data matrix. + + W : deprecated + Use `Xt` instead. + + .. deprecated:: 1.3 + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + Returns a data matrix of the original shape. + """ + if Xt is None and W is None: + raise TypeError("Missing required positional argument: Xt") + + if W is not None and Xt is not None: + raise ValueError("Please provide only `Xt`, and not `W`.") + + if W is not None: + warnings.warn( + ( + "Input argument `W` was renamed to `Xt` in v1.3 and will be removed" + " in v1.5." + ), + FutureWarning, + ) + Xt = W + + check_is_fitted(self) + return Xt @ self.components_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "requires_positive_X": True, + "preserves_dtype": [np.float64, np.float32], + } + + +class NMF(_BaseNMF): + """Non-Negative Matrix Factorization (NMF). + + Find two non-negative matrices, i.e. matrices with all non-negative elements, (W, H) + whose product approximates the non-negative matrix X. This factorization can be used + for example for dimensionality reduction, source separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The regularization terms are scaled by `n_features` for `W` and by `n_samples` for + `H` to keep their impact balanced with respect to one another and to the data fit + term as independent as possible of the size `n_samples` of the training set. + + The objective function is minimized with an alternating minimization of W + and H. + + Note that the transformed data is named W and the components matrix is named H. In + the NMF literature, the naming convention is usually the opposite since the data + matrix X is transposed. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int or {'auto'} or None, default=None + Number of components, if n_components is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from W or H shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + Valid options: + + - `None`: 'nndsvda' if n_components <= min(n_samples, n_features), + otherwise random. + + - `'random'`: non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + + - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + + - `'nndsvda'`: NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + + - `'nndsvdar'` NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + + - `'custom'`: Use custom matrices `W` and `H` which must both be provided. + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + solver : {'cd', 'mu'}, default='cd' + Numerical solver to use: + + - 'cd' is a Coordinate Descent solver. + - 'mu' is a Multiplicative Update solver. + + .. versionadded:: 0.17 + Coordinate Descent solver. + + .. versionadded:: 0.19 + Multiplicative Update solver. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. Used only in 'mu' solver. + + .. versionadded:: 0.19 + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + random_state : int, RandomState instance or None, default=None + Used for initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + .. versionadded:: 1.0 + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + .. versionadded:: 1.0 + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + .. versionadded:: 0.17 + Regularization parameter *l1_ratio* used in the Coordinate Descent + solver. + + verbose : int, default=0 + Whether to be verbose. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + .. versionadded:: 0.17 + *shuffle* parameter used in the Coordinate Descent solver. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_components_ : int + The number of components. It is same as the `n_components` parameter + if it was given. Otherwise, it will be same as the number of + features. + + reconstruction_err_ : float + Frobenius norm of the matrix difference, or beta-divergence, between + the training data ``X`` and the reconstructed data ``WH`` from + the fitted model. + + n_iter_ : int + Actual number of iterations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + PCA : Principal component analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import NMF + >>> model = NMF(n_components=2, init='random', random_state=0) + >>> W = model.fit_transform(X) + >>> H = model.components_ + """ + + _parameter_constraints: dict = { + **_BaseNMF._parameter_constraints, + "solver": [StrOptions({"mu", "cd"})], + "shuffle": ["boolean"], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + solver="cd", + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + random_state=None, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + verbose=0, + shuffle=False, + ): + super().__init__( + n_components=n_components, + init=init, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + ) + + self.solver = solver + self.shuffle = shuffle + + def _check_params(self, X): + super()._check_params(X) + + # solver + if self.solver != "mu" and self.beta_loss not in (2, "frobenius"): + # 'mu' is the only solver that handles other beta losses than 'frobenius' + raise ValueError( + f"Invalid beta_loss parameter: solver {self.solver!r} does not handle " + f"beta_loss = {self.beta_loss!r}" + ) + if self.solver == "mu" and self.init == "nndsvd": + warnings.warn( + ( + "The multiplicative update ('mu') solver cannot update " + "zeros present in the initialization, and so leads to " + "poorer results when used jointly with init='nndsvd'. " + "You may try init='nndsvda' or init='nndsvdar' instead." + ), + UserWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, W=None, H=None): + """Learn a NMF model for the data X and returns the transformed data. + + This is more efficient than calling fit followed by transform. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32] + ) + + with config_context(assume_finite=True): + W, H, n_iter = self._fit_transform(X, W=W, H=H) + + self.reconstruction_err_ = _beta_divergence( + X, W, H, self._beta_loss, square_root=True + ) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_iter_ = n_iter + + return W + + def _fit_transform(self, X, y=None, W=None, H=None, update_H=True): + """Learn a NMF model for the data X and returns the transformed data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed + + y : Ignored + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + update_H : bool, default=True + If True, both W and H will be estimated from initial guesses, + this corresponds to a call to the 'fit_transform' method. + If False, only W will be estimated, this corresponds to a call + to the 'transform' method. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + + H : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_iter_ : int + Actual number of iterations. + """ + check_non_negative(X, "NMF (input X)") + + # check parameters + self._check_params(X) + + if X.min() == 0 and self._beta_loss <= 0: + raise ValueError( + "When beta_loss <= 0 and X contains zeros, " + "the solver may diverge. Please add small values " + "to X, or use a positive beta_loss." + ) + + # initialize or check W and H + W, H = self._check_w_h(X, W, H, update_H) + + # scale the regularization terms + l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X) + + if self.solver == "cd": + W, H, n_iter = _fit_coordinate_descent( + X, + W, + H, + self.tol, + self.max_iter, + l1_reg_W, + l1_reg_H, + l2_reg_W, + l2_reg_H, + update_H=update_H, + verbose=self.verbose, + shuffle=self.shuffle, + random_state=self.random_state, + ) + elif self.solver == "mu": + W, H, n_iter, *_ = _fit_multiplicative_update( + X, + W, + H, + self._beta_loss, + self.max_iter, + self.tol, + l1_reg_W, + l1_reg_H, + l2_reg_W, + l2_reg_H, + update_H, + self.verbose, + ) + else: + raise ValueError("Invalid solver parameter '%s'." % self.solver) + + if n_iter == self.max_iter and self.tol > 0: + warnings.warn( + "Maximum number of iterations %d reached. Increase " + "it to improve convergence." + % self.max_iter, + ConvergenceWarning, + ) + + return W, H, n_iter + + def transform(self, X): + """Transform the data X according to the fitted NMF model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False + ) + + with config_context(assume_finite=True): + W, *_ = self._fit_transform(X, H=self.components_, update_H=False) + + return W + + +class MiniBatchNMF(_BaseNMF): + """Mini-Batch Non-Negative Matrix Factorization (NMF). + + .. versionadded:: 1.1 + + Find two non-negative matrices, i.e. matrices with all non-negative elements, + (`W`, `H`) whose product approximates the non-negative matrix `X`. This + factorization can be used for example for dimensionality reduction, source + separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}^2` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The objective function is minimized with an alternating minimization of `W` + and `H`. + + Note that the transformed data is named `W` and the components matrix is + named `H`. In the NMF literature, the naming convention is usually the opposite + since the data matrix `X` is transposed. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int or {'auto'} or None, default=None + Number of components, if `n_components` is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from W or H shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + Valid options: + + - `None`: 'nndsvda' if `n_components <= min(n_samples, n_features)`, + otherwise random. + + - `'random'`: non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + + - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness). + + - `'nndsvda'`: NNDSVD with zeros filled with the average of X + (better when sparsity is not desired). + + - `'nndsvdar'` NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired). + + - `'custom'`: Use custom matrices `W` and `H` which must both be provided. + + batch_size : int, default=1024 + Number of samples in each mini-batch. Large batch sizes + give better long-term convergence at the cost of a slower start. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between `X` + and the dot product `WH`. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for `beta_loss <= 0` (or 'itakura-saito'), the input + matrix `X` cannot contain zeros. + + tol : float, default=1e-4 + Control early stopping based on the norm of the differences in `H` + between 2 steps. To disable early stopping based on changes in `H`, set + `tol` to 0.0. + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + max_iter : int, default=200 + Maximum number of iterations over the complete dataset before + timing out. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + forget_factor : float, default=0.7 + Amount of rescaling of past information. Its value could be 1 with + finite datasets. Choosing values < 1 is recommended with online + learning as more recent batches will weight more than past batches. + + fresh_restarts : bool, default=False + Whether to completely solve for W at each step. Doing fresh restarts will likely + lead to a better solution for a same number of iterations but it is much slower. + + fresh_restarts_max_iter : int, default=30 + Maximum number of iterations when solving for W at each step. Only used when + doing fresh restarts. These iterations may be stopped early based on a small + change of W controlled by `tol`. + + transform_max_iter : int, default=None + Maximum number of iterations when solving for W at transform time. + If None, it defaults to `max_iter`. + + random_state : int, RandomState instance or None, default=None + Used for initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + verbose : bool, default=False + Whether to be verbose. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_components_ : int + The number of components. It is same as the `n_components` parameter + if it was given. Otherwise, it will be same as the number of + features. + + reconstruction_err_ : float + Frobenius norm of the matrix difference, or beta-divergence, between + the training data `X` and the reconstructed data `WH` from + the fitted model. + + n_iter_ : int + Actual number of started iterations over the whole dataset. + + n_steps_ : int + Number of mini-batches processed. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + See Also + -------- + NMF : Non-negative matrix factorization. + MiniBatchDictionaryLearning : Finds a dictionary that can best be used to represent + data using a sparse code. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + .. [3] :doi:`"Online algorithms for nonnegative matrix factorization with the + Itakura-Saito divergence" <10.1109/ASPAA.2011.6082314>` + Lefevre, A., Bach, F., Fevotte, C. (2011). WASPA. + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import MiniBatchNMF + >>> model = MiniBatchNMF(n_components=2, init='random', random_state=0) + >>> W = model.fit_transform(X) + >>> H = model.components_ + """ + + _parameter_constraints: dict = { + **_BaseNMF._parameter_constraints, + "max_no_improvement": [Interval(Integral, 1, None, closed="left"), None], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "forget_factor": [Interval(Real, 0, 1, closed="both")], + "fresh_restarts": ["boolean"], + "fresh_restarts_max_iter": [Interval(Integral, 1, None, closed="left")], + "transform_max_iter": [Interval(Integral, 1, None, closed="left"), None], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + batch_size=1024, + beta_loss="frobenius", + tol=1e-4, + max_no_improvement=10, + max_iter=200, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + forget_factor=0.7, + fresh_restarts=False, + fresh_restarts_max_iter=30, + transform_max_iter=None, + random_state=None, + verbose=0, + ): + super().__init__( + n_components=n_components, + init=init, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + ) + + self.max_no_improvement = max_no_improvement + self.batch_size = batch_size + self.forget_factor = forget_factor + self.fresh_restarts = fresh_restarts + self.fresh_restarts_max_iter = fresh_restarts_max_iter + self.transform_max_iter = transform_max_iter + + def _check_params(self, X): + super()._check_params(X) + + # batch_size + self._batch_size = min(self.batch_size, X.shape[0]) + + # forget_factor + self._rho = self.forget_factor ** (self._batch_size / X.shape[0]) + + # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] + if self._beta_loss < 1: + self._gamma = 1.0 / (2.0 - self._beta_loss) + elif self._beta_loss > 2: + self._gamma = 1.0 / (self._beta_loss - 1.0) + else: + self._gamma = 1.0 + + # transform_max_iter + self._transform_max_iter = ( + self.max_iter + if self.transform_max_iter is None + else self.transform_max_iter + ) + + return self + + def _solve_W(self, X, H, max_iter): + """Minimize the objective function w.r.t W. + + Update W with H being fixed, until convergence. This is the heart + of `transform` but it's also used during `fit` when doing fresh restarts. + """ + avg = np.sqrt(X.mean() / self._n_components) + W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype) + W_buffer = W.copy() + + # Get scaled regularization terms. Done for each minibatch to take into account + # variable sizes of minibatches. + l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X) + + for _ in range(max_iter): + W, *_ = _multiplicative_update_w( + X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma + ) + + W_diff = linalg.norm(W - W_buffer) / linalg.norm(W) + if self.tol > 0 and W_diff <= self.tol: + break + + W_buffer[:] = W + + return W + + def _minibatch_step(self, X, W, H, update_H): + """Perform the update of W and H for one minibatch.""" + batch_size = X.shape[0] + + # get scaled regularization terms. Done for each minibatch to take into account + # variable sizes of minibatches. + l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X) + + # update W + if self.fresh_restarts or W is None: + W = self._solve_W(X, H, self.fresh_restarts_max_iter) + else: + W, *_ = _multiplicative_update_w( + X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma + ) + + # necessary for stability with beta_loss < 1 + if self._beta_loss < 1: + W[W < np.finfo(np.float64).eps] = 0.0 + + batch_cost = ( + _beta_divergence(X, W, H, self._beta_loss) + + l1_reg_W * W.sum() + + l1_reg_H * H.sum() + + l2_reg_W * (W**2).sum() + + l2_reg_H * (H**2).sum() + ) / batch_size + + # update H (only at fit or fit_transform) + if update_H: + H[:] = _multiplicative_update_h( + X, + W, + H, + beta_loss=self._beta_loss, + l1_reg_H=l1_reg_H, + l2_reg_H=l2_reg_H, + gamma=self._gamma, + A=self._components_numerator, + B=self._components_denominator, + rho=self._rho, + ) + + # necessary for stability with beta_loss < 1 + if self._beta_loss <= 1: + H[H < np.finfo(np.float64).eps] = 0.0 + + return batch_cost + + def _minibatch_convergence( + self, X, batch_cost, H, H_buffer, n_samples, step, n_steps + ): + """Helper function to encapsulate the early stopping logic""" + batch_size = X.shape[0] + + # counts steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore first iteration because H is not updated yet. + if step == 1: + if self.verbose: + print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}") + return False + + # Compute an Exponentially Weighted Average of the cost function to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_cost is None: + self._ewa_cost = batch_cost + else: + alpha = batch_size / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha + + # Log progress to be able to monitor convergence + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch cost: " + f"{batch_cost}, ewa cost: {self._ewa_cost}" + ) + + # Early stopping based on change of H + H_diff = linalg.norm(H - H_buffer) / linalg.norm(H) + if self.tol > 0 and H_diff <= self.tol: + if self.verbose: + print(f"Converged (small H change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # cost function + if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min: + self._no_improvement = 0 + self._ewa_cost_min = self._ewa_cost + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in objective function) " + f"at step {step}/{n_steps}" + ) + return True + + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, W=None, H=None): + """Learn a NMF model for the data X and returns the transformed data. + + This is more efficient than calling fit followed by transform. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + y : Ignored + Not used, present here for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32] + ) + + with config_context(assume_finite=True): + W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H) + + self.reconstruction_err_ = _beta_divergence( + X, W, H, self._beta_loss, square_root=True + ) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_iter_ = n_iter + self.n_steps_ = n_steps + + return W + + def _fit_transform(self, X, W=None, H=None, update_H=True): + """Learn a NMF model for the data X and returns the transformed data. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + update_H : bool, default=True + If True, both W and H will be estimated from initial guesses, + this corresponds to a call to the `fit_transform` method. + If False, only W will be estimated, this corresponds to a call + to the `transform` method. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + + H : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_iter : int + Actual number of started iterations over the whole dataset. + + n_steps : int + Number of mini-batches processed. + """ + check_non_negative(X, "MiniBatchNMF (input X)") + self._check_params(X) + + if X.min() == 0 and self._beta_loss <= 0: + raise ValueError( + "When beta_loss <= 0 and X contains zeros, " + "the solver may diverge. Please add small values " + "to X, or use a positive beta_loss." + ) + + n_samples = X.shape[0] + + # initialize or check W and H + W, H = self._check_w_h(X, W, H, update_H) + H_buffer = H.copy() + + # Initialize auxiliary matrices + self._components_numerator = H.copy() + self._components_denominator = np.ones(H.shape, dtype=H.dtype) + + # Attributes to monitor the convergence + self._ewa_cost = None + self._ewa_cost_min = None + self._no_improvement = 0 + + batches = gen_batches(n_samples, self._batch_size) + batches = itertools.cycle(batches) + n_steps_per_iter = int(np.ceil(n_samples / self._batch_size)) + n_steps = self.max_iter * n_steps_per_iter + + for i, batch in zip(range(n_steps), batches): + batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H) + + if update_H and self._minibatch_convergence( + X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps + ): + break + + H_buffer[:] = H + + if self.fresh_restarts: + W = self._solve_W(X, H, self._transform_max_iter) + + n_steps = i + 1 + n_iter = int(np.ceil(n_steps / n_steps_per_iter)) + + if n_iter == self.max_iter and self.tol > 0: + warnings.warn( + ( + f"Maximum number of iterations {self.max_iter} reached. " + "Increase it to improve convergence." + ), + ConvergenceWarning, + ) + + return W, H, n_iter, n_steps + + def transform(self, X): + """Transform the data X according to the fitted MiniBatchNMF model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be transformed by the model. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False + ) + + W = self._solve_W(X, self.components_, self._transform_max_iter) + + return W + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, W=None, H=None): + """Update the model using the data in `X` as a mini-batch. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once (see :ref:`scaling_strategies`). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + y : Ignored + Not used, present here for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + Only used for the first call to `partial_fit`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + Only used for the first call to `partial_fit`. + + Returns + ------- + self + Returns the instance itself. + """ + has_components = hasattr(self, "components_") + + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=[np.float64, np.float32], + reset=not has_components, + ) + + if not has_components: + # This instance has not been fitted yet (fit or partial_fit) + self._check_params(X) + _, H = self._check_w_h(X, W=W, H=H, update_H=True) + + self._components_numerator = H.copy() + self._components_denominator = np.ones(H.shape, dtype=H.dtype) + self.n_steps_ = 0 + else: + H = self.components_ + + self._minibatch_step(X, None, H, update_H=True) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_steps_ += 1 + + return self diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c95f066f9667b7156ceeda679229829efaec9ec8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..d121c5e5c186fbfda337b203603ea69c7c2c1451 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_pca.py @@ -0,0 +1,747 @@ +""" Principal Component Analysis. +""" + +# Author: Alexandre Gramfort +# Olivier Grisel +# Mathieu Blondel +# Denis A. Engemann +# Michael Eickenberg +# Giorgio Patrini +# +# License: BSD 3 clause + +from math import log, sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.sparse import issparse +from scipy.sparse.linalg import svds +from scipy.special import gammaln + +from ..base import _fit_context +from ..utils import check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._array_api import _convert_to_numpy, get_namespace +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils.extmath import fast_logdet, randomized_svd, stable_cumsum, svd_flip +from ..utils.sparsefuncs import _implicit_column_offset, mean_variance_axis +from ..utils.validation import check_is_fitted +from ._base import _BasePCA + + +def _assess_dimension(spectrum, rank, n_samples): + """Compute the log-likelihood of a rank ``rank`` dataset. + + The dataset is assumed to be embedded in gaussian noise of shape(n, + dimf) having spectrum ``spectrum``. This implements the method of + T. P. Minka. + + Parameters + ---------- + spectrum : ndarray of shape (n_features,) + Data spectrum. + rank : int + Tested rank value. It should be strictly lower than n_features, + otherwise the method isn't specified (division by zero in equation + (31) from the paper). + n_samples : int + Number of samples. + + Returns + ------- + ll : float + The log-likelihood. + + References + ---------- + This implements the method of `Thomas P. Minka: + Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 + `_ + """ + xp, _ = get_namespace(spectrum) + + n_features = spectrum.shape[0] + if not 1 <= rank < n_features: + raise ValueError("the tested rank should be in [1, n_features - 1]") + + eps = 1e-15 + + if spectrum[rank - 1] < eps: + # When the tested rank is associated with a small eigenvalue, there's + # no point in computing the log-likelihood: it's going to be very + # small and won't be the max anyway. Also, it can lead to numerical + # issues below when computing pa, in particular in log((spectrum[i] - + # spectrum[j]) because this will take the log of something very small. + return -xp.inf + + pu = -rank * log(2.0) + for i in range(1, rank + 1): + pu += ( + gammaln((n_features - i + 1) / 2.0) + - log(xp.pi) * (n_features - i + 1) / 2.0 + ) + + pl = xp.sum(xp.log(spectrum[:rank])) + pl = -pl * n_samples / 2.0 + + v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank)) + pv = -log(v) * n_samples * (n_features - rank) / 2.0 + + m = n_features * rank - rank * (rank + 1.0) / 2.0 + pp = log(2.0 * xp.pi) * (m + rank) / 2.0 + + pa = 0.0 + spectrum_ = xp.asarray(spectrum, copy=True) + spectrum_[rank:n_features] = v + for i in range(rank): + for j in range(i + 1, spectrum.shape[0]): + pa += log( + (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]) + ) + log(n_samples) + + ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0 + + return ll + + +def _infer_dimension(spectrum, n_samples): + """Infers the dimension of a dataset with a given spectrum. + + The returned value will be in [1, n_features - 1]. + """ + xp, _ = get_namespace(spectrum) + + ll = xp.empty_like(spectrum) + ll[0] = -xp.inf # we don't want to return n_components = 0 + for rank in range(1, spectrum.shape[0]): + ll[rank] = _assess_dimension(spectrum, rank, n_samples) + return xp.argmax(ll) + + +class PCA(_BasePCA): + """Principal component analysis (PCA). + + Linear dimensionality reduction using Singular Value Decomposition of the + data to project it to a lower dimensional space. The input data is centered + but not scaled for each feature before applying the SVD. + + It uses the LAPACK implementation of the full SVD or a randomized truncated + SVD by the method of Halko et al. 2009, depending on the shape of the input + data and the number of components to extract. + + It can also use the scipy.sparse.linalg ARPACK implementation of the + truncated SVD. + + Notice that this class does not support sparse input. See + :class:`TruncatedSVD` for an alternative with sparse data. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py` + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, float or 'mle', default=None + Number of components to keep. + if n_components is not set all components are kept:: + + n_components == min(n_samples, n_features) + + If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's + MLE is used to guess the dimension. Use of ``n_components == 'mle'`` + will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``. + + If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the + number of components such that the amount of variance that needs to be + explained is greater than the percentage specified by n_components. + + If ``svd_solver == 'arpack'``, the number of components must be + strictly less than the minimum of n_features and n_samples. + + Hence, the None case results in:: + + n_components == min(n_samples, n_features) - 1 + + copy : bool, default=True + If False, data passed to fit are overwritten and running + fit(X).transform(X) will not yield the expected results, + use fit_transform(X) instead. + + whiten : bool, default=False + When True (False by default) the `components_` vectors are multiplied + by the square root of n_samples and then divided by the singular values + to ensure uncorrelated outputs with unit component-wise variances. + + Whitening will remove some information from the transformed signal + (the relative variance scales of the components) but can sometime + improve the predictive accuracy of the downstream estimators by + making their data respect some hard-wired assumptions. + + svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto' + If auto : + The solver is selected by a default policy based on `X.shape` and + `n_components`: if the input data is larger than 500x500 and the + number of components to extract is lower than 80% of the smallest + dimension of the data, then the more efficient 'randomized' + method is enabled. Otherwise the exact full SVD is computed and + optionally truncated afterwards. + If full : + run exact full SVD calling the standard LAPACK solver via + `scipy.linalg.svd` and select the components by postprocessing + If arpack : + run SVD truncated to n_components calling ARPACK solver via + `scipy.sparse.linalg.svds`. It requires strictly + 0 < n_components < min(X.shape) + If randomized : + run randomized SVD by the method of Halko et al. + + .. versionadded:: 0.18.0 + + tol : float, default=0.0 + Tolerance for singular values computed by svd_solver == 'arpack'. + Must be of range [0.0, infinity). + + .. versionadded:: 0.18.0 + + iterated_power : int or 'auto', default='auto' + Number of iterations for the power method computed by + svd_solver == 'randomized'. + Must be of range [0, infinity). + + .. versionadded:: 0.18.0 + + n_oversamples : int, default=10 + This parameter is only relevant when `svd_solver="randomized"`. + It corresponds to the additional number of random vectors to sample the + range of `X` so as to ensure proper conditioning. See + :func:`~sklearn.utils.extmath.randomized_svd` for more details. + + .. versionadded:: 1.1 + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Power iteration normalizer for randomized SVD solver. + Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` + for more details. + + .. versionadded:: 1.1 + + random_state : int, RandomState instance or None, default=None + Used when the 'arpack' or 'randomized' solvers are used. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + .. versionadded:: 0.18.0 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of + maximum variance in the data. Equivalently, the right singular + vectors of the centered input data, parallel to its eigenvectors. + The components are sorted by decreasing ``explained_variance_``. + + explained_variance_ : ndarray of shape (n_components,) + The amount of variance explained by each of the selected components. + The variance estimation uses `n_samples - 1` degrees of freedom. + + Equal to n_components largest eigenvalues + of the covariance matrix of X. + + .. versionadded:: 0.18 + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + + If ``n_components`` is not set then all components are stored and the + sum of the ratios is equal to 1.0. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + .. versionadded:: 0.19 + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + + Equal to `X.mean(axis=0)`. + + n_components_ : int + The estimated number of components. When n_components is set + to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this + number is estimated from input data. Otherwise it equals the parameter + n_components, or the lesser value of n_features and n_samples + if n_components is None. + + n_samples_ : int + Number of samples in the training data. + + noise_variance_ : float + The estimated noise covariance following the Probabilistic PCA model + from Tipping and Bishop 1999. See "Pattern Recognition and + Machine Learning" by C. Bishop, 12.2.1 p. 574 or + http://www.miketipping.com/papers/met-mppca.pdf. It is required to + compute the estimated data covariance and score samples. + + Equal to the average of (min(n_features, n_samples) - n_components) + smallest eigenvalues of the covariance matrix of X. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KernelPCA : Kernel Principal Component Analysis. + SparsePCA : Sparse Principal Component Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + IncrementalPCA : Incremental Principal Component Analysis. + + References + ---------- + For n_components == 'mle', this class uses the method from: + `Minka, T. P.. "Automatic choice of dimensionality for PCA". + In NIPS, pp. 598-604 `_ + + Implements the probabilistic PCA model from: + `Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal + component analysis". Journal of the Royal Statistical Society: + Series B (Statistical Methodology), 61(3), 611-622. + `_ + via the score and score_samples methods. + + For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`. + + For svd_solver == 'randomized', see: + :doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011). + "Finding structure with randomness: Probabilistic algorithms for + constructing approximate matrix decompositions". + SIAM review, 53(2), 217-288. + <10.1137/090771806>` + and also + :doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011). + "A randomized algorithm for the decomposition of matrices". + Applied and Computational Harmonic Analysis, 30(1), 47-68. + <10.1016/j.acha.2010.02.003>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import PCA + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> pca = PCA(n_components=2) + >>> pca.fit(X) + PCA(n_components=2) + >>> print(pca.explained_variance_ratio_) + [0.9924... 0.0075...] + >>> print(pca.singular_values_) + [6.30061... 0.54980...] + + >>> pca = PCA(n_components=2, svd_solver='full') + >>> pca.fit(X) + PCA(n_components=2, svd_solver='full') + >>> print(pca.explained_variance_ratio_) + [0.9924... 0.00755...] + >>> print(pca.singular_values_) + [6.30061... 0.54980...] + + >>> pca = PCA(n_components=1, svd_solver='arpack') + >>> pca.fit(X) + PCA(n_components=1, svd_solver='arpack') + >>> print(pca.explained_variance_ratio_) + [0.99244...] + >>> print(pca.singular_values_) + [6.30061...] + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 0, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="neither"), + StrOptions({"mle"}), + None, + ], + "copy": ["boolean"], + "whiten": ["boolean"], + "svd_solver": [StrOptions({"auto", "full", "arpack", "randomized"})], + "tol": [Interval(Real, 0, None, closed="left")], + "iterated_power": [ + StrOptions({"auto"}), + Interval(Integral, 0, None, closed="left"), + ], + "n_oversamples": [Interval(Integral, 1, None, closed="left")], + "power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + copy=True, + whiten=False, + svd_solver="auto", + tol=0.0, + iterated_power="auto", + n_oversamples=10, + power_iteration_normalizer="auto", + random_state=None, + ): + self.n_components = n_components + self.copy = copy + self.whiten = whiten + self.svd_solver = svd_solver + self.tol = tol + self.iterated_power = iterated_power + self.n_oversamples = n_oversamples + self.power_iteration_normalizer = power_iteration_normalizer + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Ignored. + + Returns + ------- + self : object + Returns the instance itself. + """ + self._fit(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model with X and apply the dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Ignored. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed values. + + Notes + ----- + This method returns a Fortran-ordered array. To convert it to a + C-ordered array, use 'np.ascontiguousarray'. + """ + U, S, Vt = self._fit(X) + U = U[:, : self.n_components_] + + if self.whiten: + # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) + U *= sqrt(X.shape[0] - 1) + else: + # X_new = X * V = U * S * Vt * V = U * S + U *= S[: self.n_components_] + + return U + + def _fit(self, X): + """Dispatch to the right submethod depending on the chosen solver.""" + xp, is_array_api_compliant = get_namespace(X) + + # Raise an error for sparse input and unsupported svd_solver + if issparse(X) and self.svd_solver != "arpack": + raise TypeError( + 'PCA only support sparse inputs with the "arpack" solver, while ' + f'"{self.svd_solver}" was passed. See TruncatedSVD for a possible' + " alternative." + ) + # Raise an error for non-Numpy input and arpack solver. + if self.svd_solver == "arpack" and is_array_api_compliant: + raise ValueError( + "PCA with svd_solver='arpack' is not supported for Array API inputs." + ) + + X = self._validate_data( + X, + dtype=[xp.float64, xp.float32], + accept_sparse=("csr", "csc"), + ensure_2d=True, + copy=self.copy, + ) + + # Handle n_components==None + if self.n_components is None: + if self.svd_solver != "arpack": + n_components = min(X.shape) + else: + n_components = min(X.shape) - 1 + else: + n_components = self.n_components + + # Handle svd_solver + self._fit_svd_solver = self.svd_solver + if self._fit_svd_solver == "auto": + # Small problem or n_components == 'mle', just call full PCA + if max(X.shape) <= 500 or n_components == "mle": + self._fit_svd_solver = "full" + elif 1 <= n_components < 0.8 * min(X.shape): + self._fit_svd_solver = "randomized" + # This is also the case of n_components in (0,1) + else: + self._fit_svd_solver = "full" + + # Call different fits for either full or truncated SVD + if self._fit_svd_solver == "full": + return self._fit_full(X, n_components) + elif self._fit_svd_solver in ["arpack", "randomized"]: + return self._fit_truncated(X, n_components, self._fit_svd_solver) + + def _fit_full(self, X, n_components): + """Fit the model by computing full SVD on X.""" + xp, is_array_api_compliant = get_namespace(X) + + n_samples, n_features = X.shape + + if n_components == "mle": + if n_samples < n_features: + raise ValueError( + "n_components='mle' is only supported if n_samples >= n_features" + ) + elif not 0 <= n_components <= min(n_samples, n_features): + raise ValueError( + "n_components=%r must be between 0 and " + "min(n_samples, n_features)=%r with " + "svd_solver='full'" % (n_components, min(n_samples, n_features)) + ) + + # Center data + self.mean_ = xp.mean(X, axis=0) + X -= self.mean_ + + if not is_array_api_compliant: + # Use scipy.linalg with NumPy/SciPy inputs for the sake of not + # introducing unanticipated behavior changes. In the long run we + # could instead decide to always use xp.linalg.svd for all inputs, + # but that would make this code rely on numpy's SVD instead of + # scipy's. It's not 100% clear whether they use the same LAPACK + # solver by default though (assuming both are built against the + # same BLAS). + U, S, Vt = linalg.svd(X, full_matrices=False) + else: + U, S, Vt = xp.linalg.svd(X, full_matrices=False) + # flip eigenvectors' sign to enforce deterministic output + U, Vt = svd_flip(U, Vt) + + components_ = Vt + + # Get variance explained by singular values + explained_variance_ = (S**2) / (n_samples - 1) + total_var = xp.sum(explained_variance_) + explained_variance_ratio_ = explained_variance_ / total_var + singular_values_ = xp.asarray(S, copy=True) # Store the singular values. + + # Postprocess the number of components required + if n_components == "mle": + n_components = _infer_dimension(explained_variance_, n_samples) + elif 0 < n_components < 1.0: + # number of components for which the cumulated explained + # variance percentage is superior to the desired threshold + # side='right' ensures that number of features selected + # their variance is always greater than n_components float + # passed. More discussion in issue: #15669 + if is_array_api_compliant: + # Convert to numpy as xp.cumsum and xp.searchsorted are not + # part of the Array API standard yet: + # + # https://github.com/data-apis/array-api/issues/597 + # https://github.com/data-apis/array-api/issues/688 + # + # Furthermore, it's not always safe to call them for namespaces + # that already implement them: for instance as + # cupy.searchsorted does not accept a float as second argument. + explained_variance_ratio_np = _convert_to_numpy( + explained_variance_ratio_, xp=xp + ) + else: + explained_variance_ratio_np = explained_variance_ratio_ + ratio_cumsum = stable_cumsum(explained_variance_ratio_np) + n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1 + + # Compute noise covariance using Probabilistic PCA model + # The sigma2 maximum likelihood (cf. eq. 12.46) + if n_components < min(n_features, n_samples): + self.noise_variance_ = xp.mean(explained_variance_[n_components:]) + else: + self.noise_variance_ = 0.0 + + self.n_samples_ = n_samples + self.components_ = components_[:n_components, :] + self.n_components_ = n_components + self.explained_variance_ = explained_variance_[:n_components] + self.explained_variance_ratio_ = explained_variance_ratio_[:n_components] + self.singular_values_ = singular_values_[:n_components] + + return U, S, Vt + + def _fit_truncated(self, X, n_components, svd_solver): + """Fit the model by computing truncated SVD (by ARPACK or randomized) + on X. + """ + xp, _ = get_namespace(X) + + n_samples, n_features = X.shape + + if isinstance(n_components, str): + raise ValueError( + "n_components=%r cannot be a string with svd_solver='%s'" + % (n_components, svd_solver) + ) + elif not 1 <= n_components <= min(n_samples, n_features): + raise ValueError( + "n_components=%r must be between 1 and " + "min(n_samples, n_features)=%r with " + "svd_solver='%s'" + % (n_components, min(n_samples, n_features), svd_solver) + ) + elif svd_solver == "arpack" and n_components == min(n_samples, n_features): + raise ValueError( + "n_components=%r must be strictly less than " + "min(n_samples, n_features)=%r with " + "svd_solver='%s'" + % (n_components, min(n_samples, n_features), svd_solver) + ) + + random_state = check_random_state(self.random_state) + + # Center data + total_var = None + if issparse(X): + self.mean_, var = mean_variance_axis(X, axis=0) + total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1 + X = _implicit_column_offset(X, self.mean_) + else: + self.mean_ = xp.mean(X, axis=0) + X -= self.mean_ + + if svd_solver == "arpack": + v0 = _init_arpack_v0(min(X.shape), random_state) + U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0) + # svds doesn't abide by scipy.linalg.svd/randomized_svd + # conventions, so reverse its outputs. + S = S[::-1] + # flip eigenvectors' sign to enforce deterministic output + U, Vt = svd_flip(U[:, ::-1], Vt[::-1]) + + elif svd_solver == "randomized": + # sign flipping is done inside + U, S, Vt = randomized_svd( + X, + n_components=n_components, + n_oversamples=self.n_oversamples, + n_iter=self.iterated_power, + power_iteration_normalizer=self.power_iteration_normalizer, + flip_sign=True, + random_state=random_state, + ) + + self.n_samples_ = n_samples + self.components_ = Vt + self.n_components_ = n_components + + # Get variance explained by singular values + self.explained_variance_ = (S**2) / (n_samples - 1) + + # Workaround in-place variance calculation since at the time numpy + # did not have a way to calculate variance in-place. + # + # TODO: update this code to either: + # * Use the array-api variance calculation, unless memory usage suffers + # * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api + # See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991 + if total_var is None: + N = X.shape[0] - 1 + X **= 2 + total_var = xp.sum(X) / N + + self.explained_variance_ratio_ = self.explained_variance_ / total_var + self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values. + + if self.n_components_ < min(n_features, n_samples): + self.noise_variance_ = total_var - xp.sum(self.explained_variance_) + self.noise_variance_ /= min(n_features, n_samples) - n_components + else: + self.noise_variance_ = 0.0 + + return U, S, Vt + + def score_samples(self, X): + """Return the log-likelihood of each sample. + + See. "Pattern Recognition and Machine Learning" + by C. Bishop, 12.2.1 p. 574 + or http://www.miketipping.com/papers/met-mppca.pdf + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + Returns + ------- + ll : ndarray of shape (n_samples,) + Log-likelihood of each sample under the current model. + """ + check_is_fitted(self) + xp, _ = get_namespace(X) + X = self._validate_data(X, dtype=[xp.float64, xp.float32], reset=False) + Xr = X - self.mean_ + n_features = X.shape[1] + precision = self.get_precision() + log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision)) + return log_like + + def score(self, X, y=None): + """Return the average log-likelihood of all samples. + + See. "Pattern Recognition and Machine Learning" + by C. Bishop, 12.2.1 p. 574 + or http://www.miketipping.com/papers/met-mppca.pdf + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + y : Ignored + Ignored. + + Returns + ------- + ll : float + Average log-likelihood of the samples under the current model. + """ + xp, _ = get_namespace(X) + return float(xp.mean(self.score_samples(X))) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32], "array_api_support": True} diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..b14df8c5f4d222a2750be1fc413a671dfbc558e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py @@ -0,0 +1,551 @@ +"""Matrix factorization with Sparse PCA.""" +# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..linear_model import ridge_regression +from ..utils import check_random_state +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import svd_flip +from ..utils.validation import check_array, check_is_fitted +from ._dict_learning import MiniBatchDictionaryLearning, dict_learning + + +class _BaseSparsePCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Base class for SparsePCA and MiniBatchSparsePCA""" + + _parameter_constraints: dict = { + "n_components": [None, Interval(Integral, 1, None, closed="left")], + "alpha": [Interval(Real, 0.0, None, closed="left")], + "ridge_alpha": [Interval(Real, 0.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "method": [StrOptions({"lars", "cd"})], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1000, + tol=1e-8, + method="lars", + n_jobs=None, + verbose=False, + random_state=None, + ): + self.n_components = n_components + self.alpha = alpha + self.ridge_alpha = ridge_alpha + self.max_iter = max_iter + self.tol = tol + self.method = method + self.n_jobs = n_jobs + self.verbose = verbose + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + random_state = check_random_state(self.random_state) + X = self._validate_data(X) + + self.mean_ = X.mean(axis=0) + X = X - self.mean_ + + if self.n_components is None: + n_components = X.shape[1] + else: + n_components = self.n_components + + return self._fit(X, n_components, random_state) + + def transform(self, X): + """Least Squares projection of the data onto the sparse components. + + To avoid instability issues in case the system is under-determined, + regularization can be applied (Ridge regression) via the + `ridge_alpha` parameter. + + Note that Sparse PCA components orthogonality is not enforced as in PCA + hence one cannot use a simple linear projection. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test data to be transformed, must have the same number of + features as the data used to train the model. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + X = X - self.mean_ + + U = ridge_regression( + self.components_.T, X.T, self.ridge_alpha, solver="cholesky" + ) + + return U + + def inverse_transform(self, X): + """Transform data from the latent space to the original space. + + This inversion is an approximation due to the loss of information + induced by the forward decomposition. + + .. versionadded:: 1.2 + + Parameters + ---------- + X : ndarray of shape (n_samples, n_components) + Data in the latent space. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Reconstructed data in the original space. + """ + check_is_fitted(self) + X = check_array(X) + + return (X @ self.components_) + self.mean_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } + + +class SparsePCA(_BaseSparsePCA): + """Sparse Principal Components Analysis (SparsePCA). + + Finds the set of sparse components that can optimally reconstruct + the data. The amount of sparseness is controllable by the coefficient + of the L1 penalty, given by the parameter alpha. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of sparse atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1 + Sparsity controlling parameter. Higher values lead to sparser + components. + + ridge_alpha : float, default=0.01 + Amount of ridge shrinkage to apply in order to improve + conditioning when calling the transform method. + + max_iter : int, default=1000 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for the stopping condition. + + method : {'lars', 'cd'}, default='lars' + Method to be used for optimization. + lars: uses the least angle regression method to solve the lasso problem + (linear_model.lars_path) + cd: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + U_init : ndarray of shape (n_samples, n_components), default=None + Initial values for the loadings for warm restart scenarios. Only used + if `U_init` and `V_init` are not None. + + V_init : ndarray of shape (n_components, n_features), default=None + Initial values for the components for warm restart scenarios. Only used + if `U_init` and `V_init` are not None. + + verbose : int or bool, default=False + Controls the verbosity; the higher, the more messages. Defaults to 0. + + random_state : int, RandomState instance or None, default=None + Used during dictionary learning. Pass an int for reproducible results + across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Sparse components extracted from the data. + + error_ : ndarray + Vector of errors at each iteration. + + n_components_ : int + Estimated number of components. + + .. versionadded:: 0.23 + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + Equal to ``X.mean(axis=0)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA : Principal Component Analysis implementation. + MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less + accurate. + DictionaryLearning : Generic dictionary learning problem using a sparse code. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.decomposition import SparsePCA + >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0) + >>> transformer = SparsePCA(n_components=5, random_state=0) + >>> transformer.fit(X) + SparsePCA(...) + >>> X_transformed = transformer.transform(X) + >>> X_transformed.shape + (200, 5) + >>> # most values in the components_ are zero (sparsity) + >>> np.mean(transformer.components_ == 0) + 0.9666... + """ + + _parameter_constraints: dict = { + **_BaseSparsePCA._parameter_constraints, + "U_init": [None, np.ndarray], + "V_init": [None, np.ndarray], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1000, + tol=1e-8, + method="lars", + n_jobs=None, + U_init=None, + V_init=None, + verbose=False, + random_state=None, + ): + super().__init__( + n_components=n_components, + alpha=alpha, + ridge_alpha=ridge_alpha, + max_iter=max_iter, + tol=tol, + method=method, + n_jobs=n_jobs, + verbose=verbose, + random_state=random_state, + ) + self.U_init = U_init + self.V_init = V_init + + def _fit(self, X, n_components, random_state): + """Specialized `fit` for SparsePCA.""" + + code_init = self.V_init.T if self.V_init is not None else None + dict_init = self.U_init.T if self.U_init is not None else None + code, dictionary, E, self.n_iter_ = dict_learning( + X.T, + n_components, + alpha=self.alpha, + tol=self.tol, + max_iter=self.max_iter, + method=self.method, + n_jobs=self.n_jobs, + verbose=self.verbose, + random_state=random_state, + code_init=code_init, + dict_init=dict_init, + return_n_iter=True, + ) + # flip eigenvectors' sign to enforce deterministic output + code, dictionary = svd_flip(code, dictionary, u_based_decision=False) + self.components_ = code.T + components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis] + components_norm[components_norm == 0] = 1 + self.components_ /= components_norm + self.n_components_ = len(self.components_) + + self.error_ = E + return self + + +class MiniBatchSparsePCA(_BaseSparsePCA): + """Mini-batch Sparse Principal Components Analysis. + + Finds the set of sparse components that can optimally reconstruct + the data. The amount of sparseness is controllable by the coefficient + of the L1 penalty, given by the parameter alpha. + + For an example comparing sparse PCA to PCA, see + :ref:`sphx_glr_auto_examples_decomposition_plot_faces_decomposition.py` + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of sparse atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : int, default=1 + Sparsity controlling parameter. Higher values lead to sparser + components. + + ridge_alpha : float, default=0.01 + Amount of ridge shrinkage to apply in order to improve + conditioning when calling the transform method. + + max_iter : int, default=1_000 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.2 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `100`) instead. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + batch_size : int, default=3 + The number of features to take in each mini batch. + + verbose : int or bool, default=False + Controls the verbosity; the higher, the more messages. Defaults to 0. + + shuffle : bool, default=True + Whether to shuffle the data before splitting it in batches. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + method : {'lars', 'cd'}, default='lars' + Method to be used for optimization. + lars: uses the least angle regression method to solve the lasso problem + (linear_model.lars_path) + cd: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). Lars will be faster if + the estimated components are sparse. + + random_state : int, RandomState instance or None, default=None + Used for random shuffling when ``shuffle`` is set to ``True``, + during online dictionary learning. Pass an int for reproducible results + across multiple function calls. + See :term:`Glossary `. + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int or None, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to `None`. + + .. versionadded:: 1.1 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Sparse components extracted from the data. + + n_components_ : int + Estimated number of components. + + .. versionadded:: 0.23 + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + Equal to ``X.mean(axis=0)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + IncrementalPCA : Incremental principal components analysis. + PCA : Principal component analysis. + SparsePCA : Sparse Principal Components Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.decomposition import MiniBatchSparsePCA + >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0) + >>> transformer = MiniBatchSparsePCA(n_components=5, batch_size=50, + ... max_iter=10, random_state=0) + >>> transformer.fit(X) + MiniBatchSparsePCA(...) + >>> X_transformed = transformer.transform(X) + >>> X_transformed.shape + (200, 5) + >>> # most values in the components_ are zero (sparsity) + >>> np.mean(transformer.components_ == 0) + 0.9... + """ + + _parameter_constraints: dict = { + **_BaseSparsePCA._parameter_constraints, + "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)], + "callback": [None, callable], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1_000, + callback=None, + batch_size=3, + verbose=False, + shuffle=True, + n_jobs=None, + method="lars", + random_state=None, + tol=1e-3, + max_no_improvement=10, + ): + super().__init__( + n_components=n_components, + alpha=alpha, + ridge_alpha=ridge_alpha, + max_iter=max_iter, + tol=tol, + method=method, + n_jobs=n_jobs, + verbose=verbose, + random_state=random_state, + ) + self.callback = callback + self.batch_size = batch_size + self.shuffle = shuffle + self.max_no_improvement = max_no_improvement + + def _fit(self, X, n_components, random_state): + """Specialized `fit` for MiniBatchSparsePCA.""" + + transform_algorithm = "lasso_" + self.method + est = MiniBatchDictionaryLearning( + n_components=n_components, + alpha=self.alpha, + max_iter=self.max_iter, + dict_init=None, + batch_size=self.batch_size, + shuffle=self.shuffle, + n_jobs=self.n_jobs, + fit_algorithm=self.method, + random_state=random_state, + transform_algorithm=transform_algorithm, + transform_alpha=self.alpha, + verbose=self.verbose, + callback=self.callback, + tol=self.tol, + max_no_improvement=self.max_no_improvement, + ) + est.set_output(transform="default") + est.fit(X.T) + + self.components_, self.n_iter_ = est.transform(X.T).T, est.n_iter_ + + components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis] + components_norm[components_norm == 0] = 1 + self.components_ /= components_norm + self.n_components_ = len(self.components_) + + return self diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py new file mode 100644 index 0000000000000000000000000000000000000000..725683e8d46c6eef2c7fc53780c65f91e51122cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py @@ -0,0 +1,319 @@ +"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). +""" + +# Author: Lars Buitinck +# Olivier Grisel +# Michael Becker +# License: 3-clause BSD. + +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy.sparse.linalg import svds + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..utils import check_array, check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import check_is_fitted + +__all__ = ["TruncatedSVD"] + + +class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Dimensionality reduction using truncated SVD (aka LSA). + + This transformer performs linear dimensionality reduction by means of + truncated singular value decomposition (SVD). Contrary to PCA, this + estimator does not center the data before computing the singular value + decomposition. This means it can work with sparse matrices + efficiently. + + In particular, truncated SVD works on term count/tf-idf matrices as + returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In + that context, it is known as latent semantic analysis (LSA). + + This estimator supports two algorithms: a fast randomized SVD solver, and + a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or + `X.T * X`, whichever is more efficient. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=2 + Desired dimensionality of output data. + If algorithm='arpack', must be strictly less than the number of features. + If algorithm='randomized', must be less than or equal to the number of features. + The default value is useful for visualisation. For LSA, a value of + 100 is recommended. + + algorithm : {'arpack', 'randomized'}, default='randomized' + SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy + (scipy.sparse.linalg.svds), or "randomized" for the randomized + algorithm due to Halko (2009). + + n_iter : int, default=5 + Number of iterations for randomized SVD solver. Not used by ARPACK. The + default is larger than the default in + :func:`~sklearn.utils.extmath.randomized_svd` to handle sparse + matrices that may have large slowly decaying spectrum. + + n_oversamples : int, default=10 + Number of oversamples for randomized SVD solver. Not used by ARPACK. + See :func:`~sklearn.utils.extmath.randomized_svd` for a complete + description. + + .. versionadded:: 1.1 + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Power iteration normalizer for randomized SVD solver. + Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` + for more details. + + .. versionadded:: 1.1 + + random_state : int, RandomState instance or None, default=None + Used during randomized svd. Pass an int for reproducible results across + multiple function calls. + See :term:`Glossary `. + + tol : float, default=0.0 + Tolerance for ARPACK. 0 means machine precision. Ignored by randomized + SVD solver. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The right singular vectors of the input data. + + explained_variance_ : ndarray of shape (n_components,) + The variance of the training samples transformed by a projection to + each component. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + FactorAnalysis : A simple linear generative model with + Gaussian latent variables. + IncrementalPCA : Incremental principal components analysis. + KernelPCA : Kernel Principal component analysis. + NMF : Non-Negative Matrix Factorization. + PCA : Principal component analysis. + + Notes + ----- + SVD suffers from a problem called "sign indeterminacy", which means the + sign of the ``components_`` and the output from transform depend on the + algorithm and random state. To work around this, fit instances of this + class to data once, then keep the instance around to do transformations. + + References + ---------- + :arxiv:`Halko, et al. (2009). "Finding structure with randomness: + Stochastic algorithms for constructing approximate matrix decompositions" + <0909.4061>` + + Examples + -------- + >>> from sklearn.decomposition import TruncatedSVD + >>> from scipy.sparse import csr_matrix + >>> import numpy as np + >>> np.random.seed(0) + >>> X_dense = np.random.rand(100, 100) + >>> X_dense[:, 2 * np.arange(50)] = 0 + >>> X = csr_matrix(X_dense) + >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) + >>> svd.fit(X) + TruncatedSVD(n_components=5, n_iter=7, random_state=42) + >>> print(svd.explained_variance_ratio_) + [0.0157... 0.0512... 0.0499... 0.0479... 0.0453...] + >>> print(svd.explained_variance_ratio_.sum()) + 0.2102... + >>> print(svd.singular_values_) + [35.2410... 4.5981... 4.5420... 4.4486... 4.3288...] + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "algorithm": [StrOptions({"arpack", "randomized"})], + "n_iter": [Interval(Integral, 0, None, closed="left")], + "n_oversamples": [Interval(Integral, 1, None, closed="left")], + "power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})], + "random_state": ["random_state"], + "tol": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + n_components=2, + *, + algorithm="randomized", + n_iter=5, + n_oversamples=10, + power_iteration_normalizer="auto", + random_state=None, + tol=0.0, + ): + self.algorithm = algorithm + self.n_components = n_components + self.n_iter = n_iter + self.n_oversamples = n_oversamples + self.power_iteration_normalizer = power_iteration_normalizer + self.random_state = random_state + self.tol = tol + + def fit(self, X, y=None): + """Fit model on training data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the transformer object. + """ + self.fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit model to X and perform dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Reduced version of X. This will always be a dense array. + """ + X = self._validate_data(X, accept_sparse=["csr", "csc"], ensure_min_features=2) + random_state = check_random_state(self.random_state) + + if self.algorithm == "arpack": + v0 = _init_arpack_v0(min(X.shape), random_state) + U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0) + # svds doesn't abide by scipy.linalg.svd/randomized_svd + # conventions, so reverse its outputs. + Sigma = Sigma[::-1] + U, VT = svd_flip(U[:, ::-1], VT[::-1]) + + elif self.algorithm == "randomized": + if self.n_components > X.shape[1]: + raise ValueError( + f"n_components({self.n_components}) must be <=" + f" n_features({X.shape[1]})." + ) + U, Sigma, VT = randomized_svd( + X, + self.n_components, + n_iter=self.n_iter, + n_oversamples=self.n_oversamples, + power_iteration_normalizer=self.power_iteration_normalizer, + random_state=random_state, + ) + + self.components_ = VT + + # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T, + # X @ V is not the same as U @ Sigma + if self.algorithm == "randomized" or ( + self.algorithm == "arpack" and self.tol > 0 + ): + X_transformed = safe_sparse_dot(X, self.components_.T) + else: + X_transformed = U * Sigma + + # Calculate explained variance & explained variance ratio + self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) + if sp.issparse(X): + _, full_var = mean_variance_axis(X, axis=0) + full_var = full_var.sum() + else: + full_var = np.var(X, axis=0).sum() + self.explained_variance_ratio_ = exp_var / full_var + self.singular_values_ = Sigma # Store the singular values. + + return X_transformed + + def transform(self, X): + """Perform dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Reduced version of X. This will always be a dense array. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False) + return safe_sparse_dot(X, self.components_.T) + + def inverse_transform(self, X): + """Transform X back to its original space. + + Returns an array X_original whose transform would be X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + New data. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Note that this is always a dense array. + """ + X = check_array(X) + return np.dot(X, self.components_) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8522bc6b97994b9745dd1087e0210f884a3c90d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91fa1e21991b9309ddd5649fb352636814c004f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3d0f3cd4f486c83541f62c1484ca37b8ef0aded Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6a33e026b4bf5c3060e8fd3b6d5c2a434cb1160 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6520293ba9ac30e3b0474a0734b8f984d712b62a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bce88f9adc97b4cf1a764feaafa1fc6ecb7b41c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..446c8c900f1f5ebc7fc9d9023820dce64071dc62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a992ae1dfbf034208ca643153b0dc2ccff4720b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..470440313b25c6dce6c6ff931d650863c31b09a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12ac8c91fa2dcd86369163f48fa17e03879639c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72b35cc026eb19223eea7541a6f49fdf9fe435fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff14f8d71722463e4cd4f8c815c957ffd7ba9f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py @@ -0,0 +1,116 @@ +# Author: Christian Osendorfer +# Alexandre Gramfort +# License: BSD3 + +from itertools import combinations + +import numpy as np +import pytest + +from sklearn.decomposition import FactorAnalysis +from sklearn.decomposition._factor_analysis import _ortho_rotation +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + ignore_warnings, +) + + +# Ignore warnings from switching to more power iterations in randomized_svd +@ignore_warnings +def test_factor_analysis(): + # Test FactorAnalysis ability to recover the data covariance structure + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 20, 5, 3 + + # Some random settings for the generative model + W = rng.randn(n_components, n_features) + # latent variable of dim 3, 20 of it + h = rng.randn(n_samples, n_components) + # using gamma to model different noise variance + # per component + noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features) + + # generate observations + # wlog, mean is 0 + X = np.dot(h, W) + noise + + fas = [] + for method in ["randomized", "lapack"]: + fa = FactorAnalysis(n_components=n_components, svd_method=method) + fa.fit(X) + fas.append(fa) + + X_t = fa.transform(X) + assert X_t.shape == (n_samples, n_components) + + assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum()) + assert_almost_equal(fa.score_samples(X).mean(), fa.score(X)) + + diff = np.all(np.diff(fa.loglike_)) + assert diff > 0.0, "Log likelihood dif not increase" + + # Sample Covariance + scov = np.cov(X, rowvar=0.0, bias=1.0) + + # Model Covariance + mcov = fa.get_covariance() + diff = np.sum(np.abs(scov - mcov)) / W.size + assert diff < 0.1, "Mean absolute difference is %f" % diff + fa = FactorAnalysis( + n_components=n_components, noise_variance_init=np.ones(n_features) + ) + with pytest.raises(ValueError): + fa.fit(X[:, :2]) + + def f(x, y): + return np.abs(getattr(x, y)) # sign will not be equal + + fa1, fa2 = fas + for attr in ["loglike_", "components_", "noise_variance_"]: + assert_almost_equal(f(fa1, attr), f(fa2, attr)) + + fa1.max_iter = 1 + fa1.verbose = True + with pytest.warns(ConvergenceWarning): + fa1.fit(X) + + # Test get_covariance and get_precision with n_components == n_features + # with n_components < n_features and with n_components == 0 + for n_components in [0, 2, X.shape[1]]: + fa.n_components = n_components + fa.fit(X) + cov = fa.get_covariance() + precision = fa.get_precision() + assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12) + + # test rotation + n_components = 2 + + results, projections = {}, {} + for method in (None, "varimax", "quartimax"): + fa_var = FactorAnalysis(n_components=n_components, rotation=method) + results[method] = fa_var.fit_transform(X) + projections[method] = fa_var.get_covariance() + for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2): + assert not np.allclose(results[rot1], results[rot2]) + assert np.allclose(projections[rot1], projections[rot2], atol=3) + + # test against R's psych::principal with rotate="varimax" + # (i.e., the values below stem from rotating the components in R) + # R's factor analysis returns quite different values; therefore, we only + # test the rotation itself + factors = np.array( + [ + [0.89421016, -0.35854928, -0.27770122, 0.03773647], + [-0.45081822, -0.89132754, 0.0932195, -0.01787973], + [0.99500666, -0.02031465, 0.05426497, -0.11539407], + [0.96822861, -0.06299656, 0.24411001, 0.07540887], + ] + ) + r_solution = np.array( + [[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]] + ) + rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T + assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py new file mode 100644 index 0000000000000000000000000000000000000000..6a376b01ecb19ab531729307229161eaca34946e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py @@ -0,0 +1,451 @@ +""" +Test the fastica algorithm. +""" +import itertools +import os +import warnings + +import numpy as np +import pytest +from scipy import stats + +from sklearn.decomposition import PCA, FastICA, fastica +from sklearn.decomposition._fastica import _gs_decorrelation +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import assert_allclose + + +def center_and_norm(x, axis=-1): + """Centers and norms x **in place** + + Parameters + ----------- + x: ndarray + Array with an axis of observations (statistical units) measured on + random variables. + axis: int, optional + Axis along which the mean and variance are calculated. + """ + x = np.rollaxis(x, axis) + x -= x.mean(axis=0) + x /= x.std(axis=0) + + +def test_gs(): + # Test gram schmidt orthonormalization + # generate a random orthogonal matrix + rng = np.random.RandomState(0) + W, _, _ = np.linalg.svd(rng.randn(10, 10)) + w = rng.randn(10) + _gs_decorrelation(w, W, 10) + assert (w**2).sum() < 1.0e-10 + w = rng.randn(10) + u = _gs_decorrelation(w, W, 5) + tmp = np.dot(u, W.T) + assert (tmp[:5] ** 2).sum() < 1.0e-10 + + +def test_fastica_attributes_dtypes(global_dtype): + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + fica = FastICA( + n_components=5, max_iter=1000, whiten="unit-variance", random_state=0 + ).fit(X) + assert fica.components_.dtype == global_dtype + assert fica.mixing_.dtype == global_dtype + assert fica.mean_.dtype == global_dtype + assert fica.whitening_.dtype == global_dtype + + +def test_fastica_return_dtypes(global_dtype): + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + k_, mixing_, s_ = fastica( + X, max_iter=1000, whiten="unit-variance", random_state=rng + ) + assert k_.dtype == global_dtype + assert mixing_.dtype == global_dtype + assert s_.dtype == global_dtype + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_fastica_simple(add_noise, global_random_seed, global_dtype): + if ( + global_random_seed == 20 + and global_dtype == np.float32 + and not add_noise + and os.getenv("DISTRIB") == "ubuntu" + ): + pytest.xfail( + "FastICA instability with Ubuntu Atlas build with float32 " + "global_dtype. For more details, see " + "https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119" # noqa + ) + + # Test the FastICA algorithm on very simple data. + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + # Generate two sources: + s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 + s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed) + s = np.c_[s1, s2].T + center_and_norm(s) + s = s.astype(global_dtype) + s1, s2 = s + + # Mixing angle + phi = 0.6 + mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) + mixing = mixing.astype(global_dtype) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(2, 1000) + + center_and_norm(m) + + # function as fun arg + def g_test(x): + return x**3, (3 * x**2).mean(axis=-1) + + algos = ["parallel", "deflation"] + nls = ["logcosh", "exp", "cube", g_test] + whitening = ["arbitrary-variance", "unit-variance", False] + for algo, nl, whiten in itertools.product(algos, nls, whitening): + if whiten: + k_, mixing_, s_ = fastica( + m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng + ) + with pytest.raises(ValueError): + fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo) + else: + pca = PCA(n_components=2, whiten=True, random_state=rng) + X = pca.fit_transform(m.T) + k_, mixing_, s_ = fastica( + X, fun=nl, algorithm=algo, whiten=False, random_state=rng + ) + with pytest.raises(ValueError): + fastica(X, fun=np.tanh, algorithm=algo) + s_ = s_.T + # Check that the mixing model described in the docstring holds: + if whiten: + # XXX: exact reconstruction to standard relative tolerance is not + # possible. This is probably expected when add_noise is True but we + # also need a non-trivial atol in float32 when add_noise is False. + # + # Note that the 2 sources are non-Gaussian in this test. + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol) + + center_and_norm(s_) + s1_, s2_ = s_ + # Check to see if the sources have been estimated + # in the wrong order + if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): + s2_, s1_ = s_ + s1_ *= np.sign(np.dot(s1_, s1)) + s2_ *= np.sign(np.dot(s2_, s2)) + + # Check that we have estimated the original sources + if not add_noise: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2) + else: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1) + + # Test FastICA class + _, _, sources_fun = fastica( + m.T, fun=nl, algorithm=algo, random_state=global_random_seed + ) + ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed) + sources = ica.fit_transform(m.T) + assert ica.components_.shape == (2, 2) + assert sources.shape == (1000, 2) + + assert_allclose(sources_fun, sources) + # Set atol to account for the different magnitudes of the elements in sources + # (from 1e-4 to 1e1). + atol = np.max(np.abs(sources)) * (1e-5 if global_dtype == np.float32 else 1e-7) + assert_allclose(sources, ica.transform(m.T), atol=atol) + + assert ica.mixing_.shape == (2, 2) + + ica = FastICA(fun=np.tanh, algorithm=algo) + with pytest.raises(ValueError): + ica.fit(m.T) + + +def test_fastica_nowhiten(): + m = [[0, 1], [1, 0]] + + # test for issue #697 + ica = FastICA(n_components=1, whiten=False, random_state=0) + warn_msg = "Ignoring n_components with whiten=False." + with pytest.warns(UserWarning, match=warn_msg): + ica.fit(m) + assert hasattr(ica, "mixing_") + + +def test_fastica_convergence_fail(): + # Test the FastICA algorithm on very simple data + # (see test_non_square_fastica). + # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low. + rng = np.random.RandomState(0) + + n_samples = 1000 + # Generate two sources: + t = np.linspace(0, 100, n_samples) + s1 = np.sin(t) + s2 = np.ceil(np.sin(np.pi * t)) + s = np.c_[s1, s2].T + center_and_norm(s) + + # Mixing matrix + mixing = rng.randn(6, 2) + m = np.dot(mixing, s) + + # Do fastICA with tolerance 0. to ensure failing convergence + warn_msg = ( + "FastICA did not converge. Consider increasing tolerance " + "or the maximum number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warn_msg): + ica = FastICA( + algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.0 + ) + ica.fit(m.T) + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_non_square_fastica(add_noise): + # Test the FastICA algorithm on very simple data. + rng = np.random.RandomState(0) + + n_samples = 1000 + # Generate two sources: + t = np.linspace(0, 100, n_samples) + s1 = np.sin(t) + s2 = np.ceil(np.sin(np.pi * t)) + s = np.c_[s1, s2].T + center_and_norm(s) + s1, s2 = s + + # Mixing matrix + mixing = rng.randn(6, 2) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(6, n_samples) + + center_and_norm(m) + + k_, mixing_, s_ = fastica( + m.T, n_components=2, whiten="unit-variance", random_state=rng + ) + s_ = s_.T + + # Check that the mixing model described in the docstring holds: + assert_allclose(s_, np.dot(np.dot(mixing_, k_), m)) + + center_and_norm(s_) + s1_, s2_ = s_ + # Check to see if the sources have been estimated + # in the wrong order + if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): + s2_, s1_ = s_ + s1_ *= np.sign(np.dot(s1_, s1)) + s2_ *= np.sign(np.dot(s2_, s2)) + + # Check that we have estimated the original sources + if not add_noise: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3) + + +def test_fit_transform(global_random_seed, global_dtype): + """Test unit variance of transformed data using FastICA algorithm. + + Check that `fit_transform` gives the same result as applying + `fit` and then `transform`. + + Bug #13056 + """ + # multivariate uniform data in [0, 1] + rng = np.random.RandomState(global_random_seed) + X = rng.random_sample((100, 10)).astype(global_dtype) + max_iter = 300 + for whiten, n_components in [["unit-variance", 5], [False, None]]: + n_components_ = n_components if n_components is not None else X.shape[1] + + ica = FastICA( + n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0 + ) + with warnings.catch_warnings(): + # make sure that numerical errors do not cause sqrt of negative + # values + warnings.simplefilter("error", RuntimeWarning) + # XXX: for some seeds, the model does not converge. + # However this is not what we test here. + warnings.simplefilter("ignore", ConvergenceWarning) + Xt = ica.fit_transform(X) + assert ica.components_.shape == (n_components_, 10) + assert Xt.shape == (X.shape[0], n_components_) + + ica2 = FastICA( + n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0 + ) + with warnings.catch_warnings(): + # make sure that numerical errors do not cause sqrt of negative + # values + warnings.simplefilter("error", RuntimeWarning) + warnings.simplefilter("ignore", ConvergenceWarning) + ica2.fit(X) + assert ica2.components_.shape == (n_components_, 10) + Xt2 = ica2.transform(X) + + # XXX: we have to set atol for this test to pass for all seeds when + # fitting with float32 data. Is this revealing a bug? + if global_dtype: + atol = np.abs(Xt2).mean() / 1e6 + else: + atol = 0.0 # the default rtol is enough for float64 data + assert_allclose(Xt, Xt2, atol=atol) + + +@pytest.mark.filterwarnings("ignore:Ignoring n_components with whiten=False.") +@pytest.mark.parametrize( + "whiten, n_components, expected_mixing_shape", + [ + ("arbitrary-variance", 5, (10, 5)), + ("arbitrary-variance", 10, (10, 10)), + ("unit-variance", 5, (10, 5)), + ("unit-variance", 10, (10, 10)), + (False, 5, (10, 10)), + (False, 10, (10, 10)), + ], +) +def test_inverse_transform( + whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype +): + # Test FastICA.inverse_transform + n_samples = 100 + rng = np.random.RandomState(global_random_seed) + X = rng.random_sample((n_samples, 10)).astype(global_dtype) + + ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten) + with warnings.catch_warnings(): + # For some dataset (depending on the value of global_dtype) the model + # can fail to converge but this should not impact the definition of + # a valid inverse transform. + warnings.simplefilter("ignore", ConvergenceWarning) + Xt = ica.fit_transform(X) + assert ica.mixing_.shape == expected_mixing_shape + X2 = ica.inverse_transform(Xt) + assert X.shape == X2.shape + + # reversibility test in non-reduction case + if n_components == X.shape[1]: + # XXX: we have to set atol for this test to pass for all seeds when + # fitting with float32 data. Is this revealing a bug? + if global_dtype: + # XXX: dividing by a smaller number makes + # tests fail for some seeds. + atol = np.abs(X2).mean() / 1e5 + else: + atol = 0.0 # the default rtol is enough for float64 data + assert_allclose(X, X2, atol=atol) + + +def test_fastica_errors(): + n_features = 3 + n_samples = 10 + rng = np.random.RandomState(0) + X = rng.random_sample((n_samples, n_features)) + w_init = rng.randn(n_features + 1, n_features + 1) + with pytest.raises(ValueError, match=r"alpha must be in \[1,2\]"): + fastica(X, fun_args={"alpha": 0}) + with pytest.raises( + ValueError, match="w_init has invalid shape.+" r"should be \(3L?, 3L?\)" + ): + fastica(X, w_init=w_init) + + +def test_fastica_whiten_unit_variance(): + """Test unit variance of transformed data using FastICA algorithm. + + Bug #13056 + """ + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)) + n_components = X.shape[1] + ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0) + Xt = ica.fit_transform(X) + + assert np.var(Xt) == pytest.approx(1.0) + + +@pytest.mark.parametrize("whiten", ["arbitrary-variance", "unit-variance", False]) +@pytest.mark.parametrize("return_X_mean", [True, False]) +@pytest.mark.parametrize("return_n_iter", [True, False]) +def test_fastica_output_shape(whiten, return_X_mean, return_n_iter): + n_features = 3 + n_samples = 10 + rng = np.random.RandomState(0) + X = rng.random_sample((n_samples, n_features)) + + expected_len = 3 + return_X_mean + return_n_iter + + out = fastica( + X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean + ) + + assert len(out) == expected_len + if not whiten: + assert out[0] is None + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_fastica_simple_different_solvers(add_noise, global_random_seed): + """Test FastICA is consistent between whiten_solvers.""" + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + # Generate two sources: + s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 + s2 = stats.t.rvs(1, size=n_samples, random_state=rng) + s = np.c_[s1, s2].T + center_and_norm(s) + s1, s2 = s + + # Mixing angle + phi = rng.rand() * 2 * np.pi + mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(2, 1000) + + center_and_norm(m) + + outs = {} + for solver in ("svd", "eigh"): + ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver=solver) + sources = ica.fit_transform(m.T) + outs[solver] = sources + assert ica.components_.shape == (2, 2) + assert sources.shape == (1000, 2) + + # compared numbers are not all on the same magnitude. Using a small atol to + # make the test less brittle + assert_allclose(outs["eigh"], outs["svd"], atol=1e-12) + + +def test_fastica_eigh_low_rank_warning(global_random_seed): + """Test FastICA eigh solver raises warning for low-rank data.""" + rng = np.random.RandomState(global_random_seed) + A = rng.randn(10, 2) + X = A @ A.T + ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh") + msg = "There are some small singular values" + with pytest.warns(UserWarning, match=msg): + ica.fit(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..5d7c8aa03f174ca6b372cd6c42de15cb4a43d15a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py @@ -0,0 +1,452 @@ +"""Tests for Incremental PCA.""" +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn import datasets +from sklearn.decomposition import PCA, IncrementalPCA +from sklearn.utils._testing import ( + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS + +iris = datasets.load_iris() + + +def test_incremental_pca(): + # Incremental PCA on dense arrays. + X = iris.data + batch_size = X.shape[0] // 3 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size) + pca = PCA(n_components=2) + pca.fit_transform(X) + + X_transformed = ipca.fit_transform(X) + + assert X_transformed.shape == (X.shape[0], 2) + np.testing.assert_allclose( + ipca.explained_variance_ratio_.sum(), + pca.explained_variance_ratio_.sum(), + rtol=1e-3, + ) + + for n_components in [1, 2, X.shape[1]]: + ipca = IncrementalPCA(n_components, batch_size=batch_size) + ipca.fit(X) + cov = ipca.get_covariance() + precision = ipca.get_precision() + np.testing.assert_allclose( + np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13 + ) + + +@pytest.mark.parametrize( + "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS +) +def test_incremental_pca_sparse(sparse_container): + # Incremental PCA on sparse arrays. + X = iris.data + pca = PCA(n_components=2) + pca.fit_transform(X) + X_sparse = sparse_container(X) + batch_size = X_sparse.shape[0] // 3 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size) + + X_transformed = ipca.fit_transform(X_sparse) + + assert X_transformed.shape == (X_sparse.shape[0], 2) + np.testing.assert_allclose( + ipca.explained_variance_ratio_.sum(), + pca.explained_variance_ratio_.sum(), + rtol=1e-3, + ) + + for n_components in [1, 2, X.shape[1]]: + ipca = IncrementalPCA(n_components, batch_size=batch_size) + ipca.fit(X_sparse) + cov = ipca.get_covariance() + precision = ipca.get_precision() + np.testing.assert_allclose( + np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13 + ) + + with pytest.raises( + TypeError, + match=( + "IncrementalPCA.partial_fit does not support " + "sparse input. Either convert data to dense " + "or use IncrementalPCA.fit to do so in batches." + ), + ): + ipca.partial_fit(X_sparse) + + +def test_incremental_pca_check_projection(): + # Test that the projection of data is correct. + rng = np.random.RandomState(1999) + n, p = 100, 3 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5]) + Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) + + # Get the reconstruction of the generated data X + # Note that Xt has the same "components" as X, just separated + # This is what we want to ensure is recreated correctly + Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt) + + # Normalize + Yt /= np.sqrt((Yt**2).sum()) + + # Make sure that the first element of Yt is ~1, this means + # the reconstruction worked as expected + assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1) + + +def test_incremental_pca_inverse(): + # Test that the projection of data can be inverted. + rng = np.random.RandomState(1999) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X) + Y = ipca.transform(X) + Y_inverse = ipca.inverse_transform(Y) + assert_almost_equal(X, Y_inverse, decimal=3) + + +def test_incremental_pca_validation(): + # Test that n_components is <= n_features. + X = np.array([[0, 1, 0], [1, 0, 0]]) + n_samples, n_features = X.shape + n_components = 4 + with pytest.raises( + ValueError, + match=( + "n_components={} invalid" + " for n_features={}, need more rows than" + " columns for IncrementalPCA" + " processing".format(n_components, n_features) + ), + ): + IncrementalPCA(n_components, batch_size=10).fit(X) + + # Tests that n_components is also <= n_samples. + n_components = 3 + with pytest.raises( + ValueError, + match=( + "n_components={} must be" + " less or equal to the batch number of" + " samples {}".format(n_components, n_samples) + ), + ): + IncrementalPCA(n_components=n_components).partial_fit(X) + + +def test_n_samples_equal_n_components(): + # Ensures no warning is raised when n_samples==n_components + # Non-regression test for gh-19050 + ipca = IncrementalPCA(n_components=5) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + ipca.partial_fit(np.random.randn(5, 7)) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + ipca.fit(np.random.randn(5, 7)) + + +def test_n_components_none(): + # Ensures that n_components == None is handled correctly + rng = np.random.RandomState(1999) + for n_samples, n_features in [(50, 10), (10, 50)]: + X = rng.rand(n_samples, n_features) + ipca = IncrementalPCA(n_components=None) + + # First partial_fit call, ipca.n_components_ is inferred from + # min(X.shape) + ipca.partial_fit(X) + assert ipca.n_components_ == min(X.shape) + + # Second partial_fit call, ipca.n_components_ is inferred from + # ipca.components_ computed from the first partial_fit call + ipca.partial_fit(X) + assert ipca.n_components_ == ipca.components_.shape[0] + + +def test_incremental_pca_set_params(): + # Test that components_ sign is stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 20 + X = rng.randn(n_samples, n_features) + X2 = rng.randn(n_samples, n_features) + X3 = rng.randn(n_samples, n_features) + ipca = IncrementalPCA(n_components=20) + ipca.fit(X) + # Decreasing number of components + ipca.set_params(n_components=10) + with pytest.raises(ValueError): + ipca.partial_fit(X2) + # Increasing number of components + ipca.set_params(n_components=15) + with pytest.raises(ValueError): + ipca.partial_fit(X3) + # Returning to original setting + ipca.set_params(n_components=20) + ipca.partial_fit(X) + + +def test_incremental_pca_num_features_change(): + # Test that changing n_components will raise an error. + rng = np.random.RandomState(1999) + n_samples = 100 + X = rng.randn(n_samples, 20) + X2 = rng.randn(n_samples, 50) + ipca = IncrementalPCA(n_components=None) + ipca.fit(X) + with pytest.raises(ValueError): + ipca.partial_fit(X2) + + +def test_incremental_pca_batch_signs(): + # Test that components_ sign is stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(10, 20) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for i, j in zip(all_components[:-1], all_components[1:]): + assert_almost_equal(np.sign(i), np.sign(j), decimal=6) + + +def test_incremental_pca_batch_values(): + # Test that components_ values are stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(20, 40, 3) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for i, j in zip(all_components[:-1], all_components[1:]): + assert_almost_equal(i, j, decimal=1) + + +def test_incremental_pca_batch_rank(): + # Test sample size in each batch is always larger or equal to n_components + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 20 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(20, 90, 3) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for components_i, components_j in zip(all_components[:-1], all_components[1:]): + assert_allclose_dense_sparse(components_i, components_j) + + +def test_incremental_pca_partial_fit(): + # Test that fit and partial_fit get equivalent results. + rng = np.random.RandomState(1999) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + batch_size = 10 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X) + pipca = IncrementalPCA(n_components=2, batch_size=batch_size) + # Add one to make sure endpoint is included + batch_itr = np.arange(0, n + 1, batch_size) + for i, j in zip(batch_itr[:-1], batch_itr[1:]): + pipca.partial_fit(X[i:j, :]) + assert_almost_equal(ipca.components_, pipca.components_, decimal=3) + + +def test_incremental_pca_against_pca_iris(): + # Test that IncrementalPCA and PCA are approximate (to a sign flip). + X = iris.data + + Y_pca = PCA(n_components=2).fit_transform(X) + Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X) + + assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) + + +def test_incremental_pca_against_pca_random_data(): + # Test that IncrementalPCA and PCA are approximate (to a sign flip). + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features) + + Y_pca = PCA(n_components=3).fit_transform(X) + Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X) + + assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) + + +def test_explained_variances(): + # Test that PCA and IncrementalPCA calculations match + X = datasets.make_low_rank_matrix( + 1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999 + ) + prec = 3 + n_samples, n_features = X.shape + for nc in [None, 99]: + pca = PCA(n_components=nc).fit(X) + ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X) + assert_almost_equal( + pca.explained_variance_, ipca.explained_variance_, decimal=prec + ) + assert_almost_equal( + pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec + ) + assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec) + + +def test_singular_values(): + # Check that the IncrementalPCA output has the correct singular values + + rng = np.random.RandomState(0) + n_samples = 1000 + n_features = 100 + + X = datasets.make_low_rank_matrix( + n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng + ) + + pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X) + ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X) + assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2) + + # Compare to the Frobenius norm + X_pca = pca.transform(X) + X_ipca = ipca.transform(X) + assert_array_almost_equal( + np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12 + ) + assert_array_almost_equal( + np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2 + ) + + # Compare to the 2-norms of the score vectors + assert_array_almost_equal( + pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12 + ) + assert_array_almost_equal( + ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2 + ) + + # Set the singular values and see what we get back + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 110 + + X = datasets.make_low_rank_matrix( + n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng + ) + + pca = PCA(n_components=3, svd_solver="full", random_state=rng) + ipca = IncrementalPCA(n_components=3, batch_size=100) + + X_pca = pca.fit_transform(X) + X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) + X_pca[:, 0] *= 3.142 + X_pca[:, 1] *= 2.718 + + X_hat = np.dot(X_pca, pca.components_) + pca.fit(X_hat) + ipca.fit(X_hat) + assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14) + assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14) + + +def test_whitening(): + # Test that PCA and IncrementalPCA transforms match to sign flip. + X = datasets.make_low_rank_matrix( + 1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999 + ) + prec = 3 + n_samples, n_features = X.shape + for nc in [None, 9]: + pca = PCA(whiten=True, n_components=nc).fit(X) + ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X) + + Xt_pca = pca.transform(X) + Xt_ipca = ipca.transform(X) + assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec) + Xinv_ipca = ipca.inverse_transform(Xt_ipca) + Xinv_pca = pca.inverse_transform(Xt_pca) + assert_almost_equal(X, Xinv_ipca, decimal=prec) + assert_almost_equal(X, Xinv_pca, decimal=prec) + assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec) + + +def test_incremental_pca_partial_fit_float_division(): + # Test to ensure float division is used in all versions of Python + # (non-regression test for issue #9489) + + rng = np.random.RandomState(0) + A = rng.randn(5, 3) + 2 + B = rng.randn(7, 3) + 5 + + pca = IncrementalPCA(n_components=2) + pca.partial_fit(A) + # Set n_samples_seen_ to be a floating point number instead of an int + pca.n_samples_seen_ = float(pca.n_samples_seen_) + pca.partial_fit(B) + singular_vals_float_samples_seen = pca.singular_values_ + + pca2 = IncrementalPCA(n_components=2) + pca2.partial_fit(A) + pca2.partial_fit(B) + singular_vals_int_samples_seen = pca2.singular_values_ + + np.testing.assert_allclose( + singular_vals_float_samples_seen, singular_vals_int_samples_seen + ) + + +def test_incremental_pca_fit_overflow_error(): + # Test for overflow error on Windows OS + # (non-regression test for issue #17693) + rng = np.random.RandomState(0) + A = rng.rand(500000, 2) + + ipca = IncrementalPCA(n_components=2, batch_size=10000) + ipca.fit(A) + + pca = PCA(n_components=2) + pca.fit(A) + + np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_) + + +def test_incremental_pca_feature_names_out(): + """Check feature names out for IncrementalPCA.""" + ipca = IncrementalPCA(n_components=2).fit(iris.data) + + names = ipca.get_feature_names_out() + assert_array_equal([f"incrementalpca{i}" for i in range(2)], names) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..b222cf4e158ff7059c6e0c43fff678d907b82fea --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py @@ -0,0 +1,566 @@ +import warnings + +import numpy as np +import pytest + +import sklearn +from sklearn.datasets import load_iris, make_blobs, make_circles +from sklearn.decomposition import PCA, KernelPCA +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import Perceptron +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import _check_psd_eigenvalues + + +def test_kernel_pca(): + """Nominal test for all solvers and all known kernels + a custom one + + It tests + - that fit_transform is equivalent to fit+transform + - that the shapes of transforms and inverse transforms are correct + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + def histogram(x, y, **kwargs): + # Histogram kernel implemented as a callable. + assert kwargs == {} # no kernel_params that we didn't ask for + return np.minimum(x, y).sum() + + for eigen_solver in ("auto", "dense", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly", histogram): + # histogram kernel produces singular matrix inside linalg.solve + # XXX use a least-squares approximation? + inv = not callable(kernel) + + # transform fit data + kpca = KernelPCA( + 4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # non-regression test: previously, gamma would be 0 by default, + # forcing all eigenvalues to 0 under the poly kernel + assert X_fit_transformed.size != 0 + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform + if inv: + X_pred2 = kpca.inverse_transform(X_pred_transformed) + assert X_pred2.shape == X_pred.shape + + +def test_kernel_pca_invalid_parameters(): + """Check that kPCA raises an error if the parameters are invalid + + Tests fitting inverse transform with a precomputed kernel raises a + ValueError. + """ + estimator = KernelPCA( + n_components=10, fit_inverse_transform=True, kernel="precomputed" + ) + err_ms = "Cannot fit_inverse_transform with a precomputed kernel" + with pytest.raises(ValueError, match=err_ms): + estimator.fit(np.random.randn(10, 10)) + + +def test_kernel_pca_consistent_transform(): + """Check robustness to mutations in the original training array + + Test that after fitting a kPCA model, it stays independent of any + mutation of the values of the original data object by relying on an + internal copy. + """ + # X_fit_ needs to retain the old, unmodified copy of X + state = np.random.RandomState(0) + X = state.rand(10, 10) + kpca = KernelPCA(random_state=state).fit(X) + transformed1 = kpca.transform(X) + + X_copy = X.copy() + X[:, 0] = 666 + transformed2 = kpca.transform(X_copy) + assert_array_almost_equal(transformed1, transformed2) + + +def test_kernel_pca_deterministic_output(): + """Test that Kernel PCA produces deterministic output + + Tests that the same inputs and random state produce the same output. + """ + rng = np.random.RandomState(0) + X = rng.rand(10, 10) + eigen_solver = ("arpack", "dense") + + for solver in eigen_solver: + transformed_X = np.zeros((20, 2)) + for i in range(20): + kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng) + transformed_X[i, :] = kpca.fit_transform(X)[0] + assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_kernel_pca_sparse(csr_container): + """Test that kPCA works on a sparse data input. + + Same test as ``test_kernel_pca except inverse_transform`` since it's not + implemented for sparse matrices. + """ + rng = np.random.RandomState(0) + X_fit = csr_container(rng.random_sample((5, 4))) + X_pred = csr_container(rng.random_sample((2, 4))) + + for eigen_solver in ("auto", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly"): + # transform fit data + kpca = KernelPCA( + 4, + kernel=kernel, + eigen_solver=eigen_solver, + fit_inverse_transform=False, + random_state=0, + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform: not available for sparse matrices + # XXX: should we raise another exception type here? For instance: + # NotImplementedError. + with pytest.raises(NotFittedError): + kpca.inverse_transform(X_pred_transformed) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +@pytest.mark.parametrize("n_features", [4, 10]) +def test_kernel_pca_linear_kernel(solver, n_features): + """Test that kPCA with linear kernel is equivalent to PCA for all solvers. + + KernelPCA with linear kernel should produce the same output as PCA. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, n_features)) + X_pred = rng.random_sample((2, n_features)) + + # for a linear kernel, kernel PCA should find the same projection as PCA + # modulo the sign (direction) + # fit only the first four components: fifth is near zero eigenvalue, so + # can be trimmed due to roundoff error + n_comps = 3 if solver == "arpack" else 4 + assert_array_almost_equal( + np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)), + np.abs( + PCA(n_comps, svd_solver=solver if solver != "dense" else "full") + .fit(X_fit) + .transform(X_pred) + ), + ) + + +def test_kernel_pca_n_components(): + """Test that `n_components` is correctly taken into account for projections + + For all solvers this tests that the output has the correct shape depending + on the selected number of components. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + for c in [1, 2, 4]: + kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver) + shape = kpca.fit(X_fit).transform(X_pred).shape + + assert shape == (2, c) + + +def test_remove_zero_eig(): + """Check that the ``remove_zero_eig`` parameter works correctly. + + Tests that the null-space (Zero) eigenvalues are removed when + remove_zero_eig=True, whereas they are not by default. + """ + X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]]) + + # n_components=None (default) => remove_zero_eig is True + kpca = KernelPCA() + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + kpca = KernelPCA(n_components=2) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 2) + + kpca = KernelPCA(n_components=2, remove_zero_eig=True) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + +def test_leave_zero_eig(): + """Non-regression test for issue #12141 (PR #12143) + + This test checks that fit().transform() returns the same result as + fit_transform() in case of non-removed zero eigenvalue. + """ + X_fit = np.array([[1, 1], [0, 0]]) + + # Assert that even with all np warnings on, there is no div by zero warning + with warnings.catch_warnings(): + # There might be warnings about the kernel being badly conditioned, + # but there should not be warnings about division by zero. + # (Numpy division by zero warning can have many message variants, but + # at least we know that it is a RuntimeWarning so lets check only this) + warnings.simplefilter("error", RuntimeWarning) + with np.errstate(all="warn"): + k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense") + # Fit, then transform + A = k.fit(X_fit).transform(X_fit) + # Do both at once + B = k.fit_transform(X_fit) + # Compare + assert_array_almost_equal(np.abs(A), np.abs(B)) + + +def test_kernel_pca_precomputed(): + """Test that kPCA works with a precomputed kernel, for all solvers""" + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + X_kpca = ( + KernelPCA(4, eigen_solver=eigen_solver, random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + X_kpca2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_pred, X_fit.T)) + ) + + X_kpca_train = KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ).fit_transform(np.dot(X_fit, X_fit.T)) + + X_kpca_train2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_fit, X_fit.T)) + ) + + assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2)) + + assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_kernel_pca_precomputed_non_symmetric(solver): + """Check that the kernel centerer works. + + Tests that a non symmetric precomputed kernel is actually accepted + because the kernel centerer does its job correctly. + """ + + # a non symmetric gram matrix + K = [[1, 2], [3, 40]] + kpca = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca.fit(K) # no error + + # same test with centered kernel + Kc = [[9, -9], [-9, 9]] + kpca_c = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca_c.fit(Kc) + + # comparison between the non-centered and centered versions + assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_) + assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_) + + +def test_gridsearch_pipeline(): + """Check that kPCA works as expected in a grid search pipeline + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="rbf", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + grid_search.fit(X, y) + assert grid_search.best_score_ == 1 + + +def test_gridsearch_pipeline_precomputed(): + """Check that kPCA works as expected in a grid search pipeline (2) + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. This test uses a precomputed kernel. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="precomputed", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(Perceptron__max_iter=np.arange(1, 5)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + X_kernel = rbf_kernel(X, gamma=2.0) + grid_search.fit(X_kernel, y) + assert grid_search.best_score_ == 1 + + +def test_nested_circles(): + """Check that kPCA projects in a space where nested circles are separable + + Tests that 2D nested circles become separable with a perceptron when + projected in the first 2 kPCA using an RBF kernel, while raw samples + are not directly separable in the original space. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + + # 2D nested circles are not linearly separable + train_score = Perceptron(max_iter=5).fit(X, y).score(X, y) + assert train_score < 0.8 + + # Project the circles data into the first 2 components of a RBF Kernel + # PCA model. + # Note that the gamma value is data dependent. If this test breaks + # and the gamma value has to be updated, the Kernel PCA example will + # have to be updated too. + kpca = KernelPCA( + kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0 + ) + X_kpca = kpca.fit_transform(X) + + # The data is perfectly linearly separable in that space + train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y) + assert train_score == 1.0 + + +def test_kernel_conditioning(): + """Check that ``_check_psd_eigenvalues`` is correctly called in kPCA + + Non-regression test for issue #12140 (PR #12145). + """ + + # create a pathological X leading to small non-zero eigenvalue + X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]] + kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True) + kpca.fit(X) + + # check that the small non-zero eigenvalue was correctly set to zero + assert kpca.eigenvalues_.min() == 0 + assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_precomputed_kernel_not_psd(solver): + """Check how KernelPCA works with non-PSD kernels depending on n_components + + Tests for all methods what happens with a non PSD gram matrix (this + can happen in an isomap scenario, or with custom kernel functions, or + maybe with ill-posed datasets). + + When ``n_component`` is large enough to capture a negative eigenvalue, an + error should be raised. Otherwise, KernelPCA should run without error + since the negative eigenvalues are not selected. + """ + + # a non PSD kernel with large eigenvalues, already centered + # it was captured from an isomap call and multiplied by 100 for compacity + K = [ + [4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78], + [-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49], + [8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23], + [2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9], + [-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75], + [-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46], + ] + # this gram matrix has 5 positive eigenvalues and 3 negative ones + # [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11] + + # 1. ask for enough components to get a significant negative one + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7) + # make sure that the appropriate error is raised + with pytest.raises(ValueError, match="There are significant negative eigenvalues"): + kpca.fit(K) + + # 2. ask for a small enough n_components to get only positive ones + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2) + if solver == "randomized": + # the randomized method is still inconsistent with the others on this + # since it selects the eigenvalues based on the largest 2 modules, not + # on the largest 2 values. + # + # At least we can ensure that we return an error instead of returning + # the wrong eigenvalues + with pytest.raises( + ValueError, match="There are significant negative eigenvalues" + ): + kpca.fit(K) + else: + # general case: make sure that it works + kpca.fit(K) + + +@pytest.mark.parametrize("n_components", [4, 10, 20]) +def test_kernel_pca_solvers_equivalence(n_components): + """Check that 'dense' 'arpack' & 'randomized' solvers give similar results""" + + # Generate random data + n_train, n_test = 1_000, 100 + X, _ = make_circles( + n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0 + ) + X_fit, X_pred = X[:n_train, :], X[n_train:, :] + + # reference (full) + ref_pred = ( + KernelPCA(n_components, eigen_solver="dense", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + # arpack + a_pred = ( + KernelPCA(n_components, eigen_solver="arpack", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approx + assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred)) + + # randomized + r_pred = ( + KernelPCA(n_components, eigen_solver="randomized", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approximation + assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred)) + + +def test_kernel_pca_inverse_transform_reconstruction(): + """Test if the reconstruction is a good approximation. + + Note that in general it is not possible to get an arbitrarily good + reconstruction because of kernel centering that does not + preserve all the information of the original data. + """ + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + + kpca = KernelPCA( + n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3 + ) + X_trans = kpca.fit_transform(X) + X_reconst = kpca.inverse_transform(X_trans) + assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1 + + +def test_kernel_pca_raise_not_fitted_error(): + X = np.random.randn(15).reshape(5, 3) + kpca = KernelPCA() + kpca.fit(X) + with pytest.raises(NotFittedError): + kpca.inverse_transform(X) + + +def test_32_64_decomposition_shape(): + """Test that the decomposition is similar for 32 and 64 bits data + + Non regression test for + https://github.com/scikit-learn/scikit-learn/issues/18146 + """ + X, y = make_blobs( + n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1 + ) + X = StandardScaler().fit_transform(X) + X -= X.min() + + # Compare the shapes (corresponds to the number of non-zero eigenvalues) + kpca = KernelPCA() + assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape + + +def test_kernel_pca_feature_names_out(): + """Check feature names out for KernelPCA.""" + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + kpca = KernelPCA(n_components=2).fit(X) + + names = kpca.get_feature_names_out() + assert_array_equal([f"kernelpca{i}" for i in range(2)], names) + + +def test_kernel_pca_inverse_correct_gamma(): + """Check that gamma is set correctly when not provided. + + Non-regression test for #26280 + """ + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + + kwargs = { + "n_components": 2, + "random_state": rng, + "fit_inverse_transform": True, + "kernel": "rbf", + } + + expected_gamma = 1 / X.shape[1] + kpca1 = KernelPCA(gamma=None, **kwargs).fit(X) + kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X) + + assert kpca1.gamma_ == expected_gamma + assert kpca2.gamma_ == expected_gamma + + X1_recon = kpca1.inverse_transform(kpca1.transform(X)) + X2_recon = kpca2.inverse_transform(kpca1.transform(X)) + + assert_allclose(X1_recon, X2_recon) + + +def test_kernel_pca_pandas_output(): + """Check that KernelPCA works with pandas output when the solver is arpack. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27579 + """ + pytest.importorskip("pandas") + X, _ = load_iris(as_frame=True, return_X_y=True) + with sklearn.config_context(transform_output="pandas"): + KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py new file mode 100644 index 0000000000000000000000000000000000000000..2112b59129e254eea3fffcec23ce08bb974cacd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py @@ -0,0 +1,1062 @@ +import re +import sys +import warnings +from io import StringIO + +import numpy as np +import pytest +from scipy import linalg + +from sklearn.base import clone +from sklearn.decomposition import NMF, MiniBatchNMF, non_negative_factorization +from sklearn.decomposition import _nmf as nmf # For testing internals +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.extmath import squared_norm +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_convergence_warning(Estimator, solver): + convergence_warning = ( + "Maximum number of iterations 1 reached. Increase it to improve convergence." + ) + A = np.ones((2, 2)) + with pytest.warns(ConvergenceWarning, match=convergence_warning): + Estimator(max_iter=1, n_components="auto", **solver).fit(A) + + +def test_initialize_nn_output(): + # Test that initialization does not return negative values + rng = np.random.mtrand.RandomState(42) + data = np.abs(rng.randn(10, 10)) + for init in ("random", "nndsvd", "nndsvda", "nndsvdar"): + W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0) + assert not ((W < 0).any() or (H < 0).any()) + + +# TODO(1.6): remove the warning filter for `n_components` +@pytest.mark.filterwarnings( + r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in" + r" the initialization", + "ignore:The default value of `n_components` will change", +) +def test_parameter_checking(): + # Here we only check for invalid parameter values that are not already + # automatically tested in the common tests. + + A = np.ones((2, 2)) + + msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0" + with pytest.raises(ValueError, match=msg): + NMF(solver="cd", beta_loss=1.0).fit(A) + msg = "Negative values in data passed to" + with pytest.raises(ValueError, match=msg): + NMF().fit(-A) + clf = NMF(2, tol=0.1).fit(A) + with pytest.raises(ValueError, match=msg): + clf.transform(-A) + with pytest.raises(ValueError, match=msg): + nmf._initialize_nmf(-A, 2, "nndsvd") + + for init in ["nndsvd", "nndsvda", "nndsvdar"]: + msg = re.escape( + "init = '{}' can only be used when " + "n_components <= min(n_samples, n_features)".format(init) + ) + with pytest.raises(ValueError, match=msg): + NMF(3, init=init).fit(A) + with pytest.raises(ValueError, match=msg): + MiniBatchNMF(3, init=init).fit(A) + with pytest.raises(ValueError, match=msg): + nmf._initialize_nmf(A, 3, init) + + +def test_initialize_close(): + # Test NNDSVD error + # Test that _initialize_nmf error is less than the standard deviation of + # the entries in the matrix. + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + W, H = nmf._initialize_nmf(A, 10, init="nndsvd") + error = linalg.norm(np.dot(W, H) - A) + sdev = linalg.norm(A - A.mean()) + assert error <= sdev + + +def test_initialize_variants(): + # Test NNDSVD variants correctness + # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic + # 'nndsvd' only where the basic version has zeros. + rng = np.random.mtrand.RandomState(42) + data = np.abs(rng.randn(10, 10)) + W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd") + Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda") + War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0) + + for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)): + assert_almost_equal(evl[ref != 0], ref[ref != 0]) + + +# ignore UserWarning raised when both solver='mu' and init='nndsvd' +@ignore_warnings(category=UserWarning) +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random")) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H): + # Test that the decomposition does not contain negative values + A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)] + model = Estimator( + n_components=2, + init=init, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=0, + **solver, + ) + transf = model.fit_transform(A) + assert not ((model.components_ < 0).any() or (transf < 0).any()) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_fit_close(Estimator, solver): + rng = np.random.mtrand.RandomState(42) + # Test that the fit is not too far away + pnmf = Estimator( + 5, + init="nndsvdar", + random_state=0, + max_iter=600, + **solver, + ) + X = np.abs(rng.randn(6, 5)) + assert pnmf.fit(X).reconstruction_err_ < 0.1 + + +def test_nmf_true_reconstruction(): + # Test that the fit is not too far away from an exact solution + # (by construction) + n_samples = 15 + n_features = 10 + n_components = 5 + beta_loss = 1 + batch_size = 3 + max_iter = 1000 + + rng = np.random.mtrand.RandomState(42) + W_true = np.zeros([n_samples, n_components]) + W_array = np.abs(rng.randn(n_samples)) + for j in range(n_components): + W_true[j % n_samples, j] = W_array[j % n_samples] + H_true = np.zeros([n_components, n_features]) + H_array = np.abs(rng.randn(n_components)) + for j in range(n_features): + H_true[j % n_components, j] = H_array[j % n_components] + X = np.dot(W_true, H_true) + + model = NMF( + n_components=n_components, + solver="mu", + beta_loss=beta_loss, + max_iter=max_iter, + random_state=0, + ) + transf = model.fit_transform(X) + X_calc = np.dot(transf, model.components_) + + assert model.reconstruction_err_ < 0.1 + assert_allclose(X, X_calc) + + mbmodel = MiniBatchNMF( + n_components=n_components, + beta_loss=beta_loss, + batch_size=batch_size, + random_state=0, + max_iter=max_iter, + ) + transf = mbmodel.fit_transform(X) + X_calc = np.dot(transf, mbmodel.components_) + + assert mbmodel.reconstruction_err_ < 0.1 + assert_allclose(X, X_calc, atol=1) + + +@pytest.mark.parametrize("solver", ["cd", "mu"]) +def test_nmf_transform(solver): + # Test that fit_transform is equivalent to fit.transform for NMF + # Test that NMF.transform returns close values + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + m = NMF( + solver=solver, + n_components=3, + init="random", + random_state=0, + tol=1e-6, + ) + ft = m.fit_transform(A) + t = m.transform(A) + assert_allclose(ft, t, atol=1e-1) + + +def test_minibatch_nmf_transform(): + # Test that fit_transform is equivalent to fit.transform for MiniBatchNMF + # Only guaranteed with fresh restarts + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + m = MiniBatchNMF( + n_components=3, + random_state=0, + tol=1e-3, + fresh_restarts=True, + ) + ft = m.fit_transform(A) + t = m.transform(A) + assert_allclose(ft, t) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_transform_custom_init(Estimator, solver): + # Smoke test that checks if NMF.transform works with custom initialization + random_state = np.random.RandomState(0) + A = np.abs(random_state.randn(6, 5)) + n_components = 4 + avg = np.sqrt(A.mean() / n_components) + H_init = np.abs(avg * random_state.randn(n_components, 5)) + W_init = np.abs(avg * random_state.randn(6, n_components)) + + m = Estimator( + n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver + ) + m.fit_transform(A, W=W_init, H=H_init) + m.transform(A) + + +@pytest.mark.parametrize("solver", ("cd", "mu")) +def test_nmf_inverse_transform(solver): + # Test that NMF.inverse_transform returns close values + random_state = np.random.RandomState(0) + A = np.abs(random_state.randn(6, 4)) + m = NMF( + solver=solver, + n_components=4, + init="random", + random_state=0, + max_iter=1000, + ) + ft = m.fit_transform(A) + A_new = m.inverse_transform(ft) + assert_array_almost_equal(A, A_new, decimal=2) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +def test_mbnmf_inverse_transform(): + # Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform + # is close to the identity + rng = np.random.RandomState(0) + A = np.abs(rng.randn(6, 4)) + nmf = MiniBatchNMF( + random_state=rng, + max_iter=500, + init="nndsvdar", + fresh_restarts=True, + ) + ft = nmf.fit_transform(A) + A_new = nmf.inverse_transform(ft) + assert_allclose(A, A_new, rtol=1e-3, atol=1e-2) + + +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_n_components_greater_n_features(Estimator): + # Smoke test for the case of more components than features. + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(30, 10)) + Estimator(n_components=15, random_state=0, tol=1e-2).fit(A) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_nmf_sparse_input(Estimator, solver, sparse_container, alpha_W, alpha_H): + # Test that sparse matrices are accepted as input + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + A[:, 2 * np.arange(5)] = 0 + A_sparse = sparse_container(A) + + est1 = Estimator( + n_components=5, + init="random", + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=0, + tol=0, + max_iter=100, + **solver, + ) + est2 = clone(est1) + + W1 = est1.fit_transform(A) + W2 = est2.fit_transform(A_sparse) + H1 = est1.components_ + H2 = est2.components_ + + assert_allclose(W1, W2) + assert_allclose(H1, H2) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_nmf_sparse_transform(Estimator, solver, csc_container): + # Test that transform works on sparse data. Issue #2124 + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(3, 2)) + A[1, 1] = 0 + A = csc_container(A) + + model = Estimator(random_state=0, n_components=2, max_iter=400, **solver) + A_fit_tr = model.fit_transform(A) + A_tr = model.transform(A) + assert_allclose(A_fit_tr, A_tr, atol=1e-1) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("init", ["random", "nndsvd"]) +@pytest.mark.parametrize("solver", ("cd", "mu")) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H): + # Test that the function is called in the same way, either directly + # or through the NMF class + max_iter = 500 + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + A[:, 2 * np.arange(5)] = 0 + + W_nmf, H, _ = non_negative_factorization( + A, + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + W_nmf_2, H, _ = non_negative_factorization( + A, + H=H, + update_H=False, + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + + model_class = NMF( + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + W_cls = model_class.fit_transform(A) + W_cls_2 = model_class.transform(A) + + assert_allclose(W_nmf, W_cls) + assert_allclose(W_nmf_2, W_cls_2) + + +def test_non_negative_factorization_checking(): + # Note that the validity of parameter types and range of possible values + # for scalar numerical or str parameters is already checked in the common + # tests. Here we only check for problems that cannot be captured by simple + # declarative constraints on the valid parameter values. + + A = np.ones((2, 2)) + # Test parameters checking in public function + nnmf = non_negative_factorization + msg = re.escape("Negative values in data passed to NMF (input H)") + with pytest.raises(ValueError, match=msg): + nnmf(A, A, -A, 2, init="custom") + msg = re.escape("Negative values in data passed to NMF (input W)") + with pytest.raises(ValueError, match=msg): + nnmf(A, -A, A, 2, init="custom") + msg = re.escape("Array passed to NMF (input H) is full of zeros") + with pytest.raises(ValueError, match=msg): + nnmf(A, A, 0 * A, 2, init="custom") + + +def _beta_divergence_dense(X, W, H, beta): + """Compute the beta-divergence of X and W.H for dense array only. + + Used as a reference for testing nmf._beta_divergence. + """ + WH = np.dot(W, H) + + if beta == 2: + return squared_norm(X - WH) / 2 + + WH_Xnonzero = WH[X != 0] + X_nonzero = X[X != 0] + np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero) + + if beta == 1: + res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero)) + res += WH.sum() - X.sum() + + elif beta == 0: + div = X_nonzero / WH_Xnonzero + res = np.sum(div) - X.size - np.sum(np.log(div)) + else: + res = (X_nonzero**beta).sum() + res += (beta - 1) * (WH**beta).sum() + res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum() + res /= beta * (beta - 1) + + return res + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_beta_divergence(csr_container): + # Compare _beta_divergence with the reference _beta_divergence_dense + n_samples = 20 + n_features = 10 + n_components = 5 + beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0] + + # initialization + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta in beta_losses: + ref = _beta_divergence_dense(X, W, H, beta) + loss = nmf._beta_divergence(X, W, H, beta) + loss_csr = nmf._beta_divergence(X_csr, W, H, beta) + + assert_almost_equal(ref, loss, decimal=7) + assert_almost_equal(ref, loss_csr, decimal=7) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_special_sparse_dot(csr_container): + # Test the function that computes np.dot(W, H), only where X is non zero. + n_samples = 10 + n_features = 5 + n_components = 3 + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + + W = np.abs(rng.randn(n_samples, n_components)) + H = np.abs(rng.randn(n_components, n_features)) + + WH_safe = nmf._special_sparse_dot(W, H, X_csr) + WH = nmf._special_sparse_dot(W, H, X) + + # test that both results have same values, in X_csr nonzero elements + ii, jj = X_csr.nonzero() + WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel() + assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10) + + # test that WH_safe and X_csr have the same sparse structure + assert_array_equal(WH_safe.indices, X_csr.indices) + assert_array_equal(WH_safe.indptr, X_csr.indptr) + assert_array_equal(WH_safe.shape, X_csr.shape) + + +@ignore_warnings(category=ConvergenceWarning) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_nmf_multiplicative_update_sparse(csr_container): + # Compare sparse and dense input in multiplicative update NMF + # Also test continuity of the results with respect to beta_loss parameter + n_samples = 20 + n_features = 10 + n_components = 5 + alpha = 0.1 + l1_ratio = 0.5 + n_iter = 20 + + # initialization + rng = np.random.mtrand.RandomState(1337) + X = rng.randn(n_samples, n_features) + X = np.abs(X) + X_csr = csr_container(X) + W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5): + # Reference with dense array X + W, H = W0.copy(), H0.copy() + W1, H1, _ = non_negative_factorization( + X, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + # Compare with sparse X + W, H = W0.copy(), H0.copy() + W2, H2, _ = non_negative_factorization( + X_csr, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + assert_allclose(W1, W2, atol=1e-7) + assert_allclose(H1, H2, atol=1e-7) + + # Compare with almost same beta_loss, since some values have a specific + # behavior, but the results should be continuous w.r.t beta_loss + beta_loss -= 1.0e-5 + W, H = W0.copy(), H0.copy() + W3, H3, _ = non_negative_factorization( + X_csr, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + assert_allclose(W1, W3, atol=1e-4) + assert_allclose(H1, H3, atol=1e-4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_nmf_negative_beta_loss(csr_container): + # Test that an error is raised if beta_loss < 0 and X contains zeros. + # Test that the output has not NaN values when the input contains zeros. + n_samples = 6 + n_features = 5 + n_components = 3 + + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + + def _assert_nmf_no_nan(X, beta_loss): + W, H, _ = non_negative_factorization( + X, + init="random", + n_components=n_components, + solver="mu", + beta_loss=beta_loss, + random_state=0, + max_iter=1000, + ) + assert not np.any(np.isnan(W)) + assert not np.any(np.isnan(H)) + + msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." + for beta_loss in (-0.6, 0.0): + with pytest.raises(ValueError, match=msg): + _assert_nmf_no_nan(X, beta_loss) + _assert_nmf_no_nan(X + 1e-9, beta_loss) + + for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5): + _assert_nmf_no_nan(X, beta_loss) + _assert_nmf_no_nan(X_csr, beta_loss) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("beta_loss", [-0.5, 0.0]) +def test_minibatch_nmf_negative_beta_loss(beta_loss): + """Check that an error is raised if beta_loss < 0 and X contains zeros.""" + rng = np.random.RandomState(0) + X = rng.normal(size=(6, 5)) + X[X < 0] = 0 + + nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0) + + msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." + with pytest.raises(ValueError, match=msg): + nmf.fit(X) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_regularization(Estimator, solver): + # Test the effect of L1 and L2 regularizations + n_samples = 6 + n_features = 5 + n_components = 3 + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(n_samples, n_features)) + + # L1 regularization should increase the number of zeros + l1_ratio = 1.0 + regul = Estimator( + n_components=n_components, + alpha_W=0.5, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + model = Estimator( + n_components=n_components, + alpha_W=0.0, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + + W_regul = regul.fit_transform(X) + W_model = model.fit_transform(X) + + H_regul = regul.components_ + H_model = model.components_ + + eps = np.finfo(np.float64).eps + W_regul_n_zeros = W_regul[W_regul <= eps].size + W_model_n_zeros = W_model[W_model <= eps].size + H_regul_n_zeros = H_regul[H_regul <= eps].size + H_model_n_zeros = H_model[H_model <= eps].size + + assert W_regul_n_zeros > W_model_n_zeros + assert H_regul_n_zeros > H_model_n_zeros + + # L2 regularization should decrease the sum of the squared norm + # of the matrices W and H + l1_ratio = 0.0 + regul = Estimator( + n_components=n_components, + alpha_W=0.5, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + model = Estimator( + n_components=n_components, + alpha_W=0.0, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + + W_regul = regul.fit_transform(X) + W_model = model.fit_transform(X) + + H_regul = regul.components_ + H_model = model.components_ + + assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > ( + linalg.norm(W_regul) + ) ** 2.0 + (linalg.norm(H_regul)) ** 2.0 + + +@ignore_warnings(category=ConvergenceWarning) +@pytest.mark.parametrize("solver", ("cd", "mu")) +def test_nmf_decreasing(solver): + # test that the objective function is decreasing at each iteration + n_samples = 20 + n_features = 15 + n_components = 10 + alpha = 0.1 + l1_ratio = 0.5 + tol = 0.0 + + # initialization + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.abs(X, X) + W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5): + if solver != "mu" and beta_loss != 2: + # not implemented + continue + W, H = W0.copy(), H0.copy() + previous_loss = None + for _ in range(30): + # one more iteration starting from the previous results + W, H, _ = non_negative_factorization( + X, + W, + H, + beta_loss=beta_loss, + init="custom", + n_components=n_components, + max_iter=1, + alpha_W=alpha, + solver=solver, + tol=tol, + l1_ratio=l1_ratio, + verbose=0, + random_state=0, + update_H=True, + ) + + loss = ( + nmf._beta_divergence(X, W, H, beta_loss) + + alpha * l1_ratio * n_features * W.sum() + + alpha * l1_ratio * n_samples * H.sum() + + alpha * (1 - l1_ratio) * n_features * (W**2).sum() + + alpha * (1 - l1_ratio) * n_samples * (H**2).sum() + ) + if previous_loss is not None: + assert previous_loss > loss + previous_loss = loss + + +def test_nmf_underflow(): + # Regression test for an underflow issue in _beta_divergence + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 10, 2, 2 + X = np.abs(rng.randn(n_samples, n_features)) * 10 + W = np.abs(rng.randn(n_samples, n_components)) * 10 + H = np.abs(rng.randn(n_components, n_features)) + + X[0, 0] = 0 + ref = nmf._beta_divergence(X, W, H, beta=1.0) + X[0, 0] = 1e-323 + res = nmf._beta_divergence(X, W, H, beta=1.0) + assert_almost_equal(res, ref) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize( + "dtype_in, dtype_out", + [ + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ], +) +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out): + # Check that NMF preserves dtype (float32 and float64) + X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False) + np.abs(X, out=X) + + nmf = Estimator( + alpha_W=1.0, + alpha_H=1.0, + tol=1e-2, + random_state=0, + **solver, + ) + + assert nmf.fit(X).transform(X).dtype == dtype_out + assert nmf.fit_transform(X).dtype == dtype_out + assert nmf.components_.dtype == dtype_out + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_float32_float64_consistency(Estimator, solver): + # Check that the result of NMF is the same between float32 and float64 + X = np.random.RandomState(0).randn(50, 7) + np.abs(X, out=X) + nmf32 = Estimator(random_state=0, tol=1e-3, **solver) + W32 = nmf32.fit_transform(X.astype(np.float32)) + nmf64 = Estimator(random_state=0, tol=1e-3, **solver) + W64 = nmf64.fit_transform(X) + + assert_allclose(W32, W64, atol=1e-5) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_nmf_custom_init_dtype_error(Estimator): + # Check that an error is raise if custom H and/or W don't have the same + # dtype as X. + rng = np.random.RandomState(0) + X = rng.random_sample((20, 15)) + H = rng.random_sample((15, 15)).astype(np.float32) + W = rng.random_sample((20, 15)) + + with pytest.raises(TypeError, match="should have the same dtype as X"): + Estimator(init="custom").fit(X, H=H, W=W) + + with pytest.raises(TypeError, match="should have the same dtype as X"): + non_negative_factorization(X, H=H, update_H=False) + + +@pytest.mark.parametrize("beta_loss", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5]) +def test_nmf_minibatchnmf_equivalence(beta_loss): + # Test that MiniBatchNMF is equivalent to NMF when batch_size = n_samples and + # forget_factor 0.0 (stopping criterion put aside) + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(48, 5)) + + nmf = NMF( + n_components=5, + beta_loss=beta_loss, + solver="mu", + random_state=0, + tol=0, + ) + mbnmf = MiniBatchNMF( + n_components=5, + beta_loss=beta_loss, + random_state=0, + tol=0, + max_no_improvement=None, + batch_size=X.shape[0], + forget_factor=0.0, + ) + W = nmf.fit_transform(X) + mbW = mbnmf.fit_transform(X) + assert_allclose(W, mbW) + + +def test_minibatch_nmf_partial_fit(): + # Check fit / partial_fit equivalence. Applicable only with fresh restarts. + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(100, 5)) + + n_components = 5 + batch_size = 10 + max_iter = 2 + + mbnmf1 = MiniBatchNMF( + n_components=n_components, + init="custom", + random_state=0, + max_iter=max_iter, + batch_size=batch_size, + tol=0, + max_no_improvement=None, + fresh_restarts=False, + ) + mbnmf2 = MiniBatchNMF(n_components=n_components, init="custom", random_state=0) + + # Force the same init of H (W is recomputed anyway) to be able to compare results. + W, H = nmf._initialize_nmf( + X, n_components=n_components, init="random", random_state=0 + ) + + mbnmf1.fit(X, W=W, H=H) + for i in range(max_iter): + for j in range(batch_size): + mbnmf2.partial_fit(X[j : j + batch_size], W=W[:batch_size], H=H) + + assert mbnmf1.n_steps_ == mbnmf2.n_steps_ + assert_allclose(mbnmf1.components_, mbnmf2.components_) + + +def test_feature_names_out(): + """Check feature names out for NMF.""" + random_state = np.random.RandomState(0) + X = np.abs(random_state.randn(10, 4)) + nmf = NMF(n_components=3).fit(X) + + names = nmf.get_feature_names_out() + assert_array_equal([f"nmf{i}" for i in range(3)], names) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +def test_minibatch_nmf_verbose(): + # Check verbose mode of MiniBatchNMF for better coverage. + A = np.random.RandomState(0).random_sample((100, 10)) + nmf = MiniBatchNMF(tol=1e-2, random_state=0, verbose=1) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + nmf.fit(A) + finally: + sys.stdout = old_stdout + + +# TODO(1.5): remove this test +def test_NMF_inverse_transform_W_deprecation(): + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + est = NMF( + n_components=3, + init="random", + random_state=0, + tol=1e-6, + ) + Xt = est.fit_transform(A) + + with pytest.raises(TypeError, match="Missing required positional argument"): + est.inverse_transform() + + with pytest.raises(ValueError, match="Please provide only"): + est.inverse_transform(Xt=Xt, W=Xt) + + with warnings.catch_warnings(record=True): + warnings.simplefilter("error") + est.inverse_transform(Xt) + + with pytest.warns(FutureWarning, match="Input argument `W` was renamed to `Xt`"): + est.inverse_transform(W=Xt) + + +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_nmf_n_components_auto(Estimator): + # Check that n_components is correctly inferred + # from the provided custom initialization. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W = rng.random_sample((6, 2)) + H = rng.random_sample((2, 5)) + est = Estimator( + n_components="auto", + init="custom", + random_state=0, + tol=1e-6, + ) + est.fit_transform(X, W=W, H=H) + assert est._n_components == H.shape[0] + + +def test_nmf_non_negative_factorization_n_components_auto(): + # Check that n_components is correctly inferred from the provided + # custom initialization. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W_init = rng.random_sample((6, 2)) + H_init = rng.random_sample((2, 5)) + W, H, _ = non_negative_factorization( + X, W=W_init, H=H_init, init="custom", n_components="auto" + ) + assert H.shape == H_init.shape + assert W.shape == W_init.shape + + +# TODO(1.6): remove +def test_nmf_n_components_default_value_warning(): + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H = rng.random_sample((2, 5)) + with pytest.warns( + FutureWarning, match="The default value of `n_components` will change from" + ): + non_negative_factorization(X, H=H) + + +def test_nmf_n_components_auto_no_h_update(): + # Tests that non_negative_factorization does not fail when setting + # n_components="auto" also tests that the inferred n_component + # value is the right one. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H_true = rng.random_sample((2, 5)) + W, H, _ = non_negative_factorization( + X, H=H_true, n_components="auto", update_H=False + ) # should not fail + assert_allclose(H, H_true) + assert W.shape == (X.shape[0], H_true.shape[0]) + + +def test_nmf_w_h_not_used_warning(): + # Check that warnings are raised if user provided W and H are not used + # and initialization overrides value of W or H + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W_init = rng.random_sample((6, 2)) + H_init = rng.random_sample((2, 5)) + with pytest.warns( + RuntimeWarning, + match="When init!='custom', provided W or H are ignored", + ): + non_negative_factorization(X, H=H_init, update_H=True, n_components="auto") + + with pytest.warns( + RuntimeWarning, + match="When init!='custom', provided W or H are ignored", + ): + non_negative_factorization( + X, W=W_init, H=H_init, update_H=True, n_components="auto" + ) + + with pytest.warns( + RuntimeWarning, match="When update_H=False, the provided initial W is not used." + ): + # When update_H is False, W is ignored regardless of init + # TODO: use the provided W when init="custom". + non_negative_factorization( + X, W=W_init, H=H_init, update_H=False, n_components="auto" + ) + + +def test_nmf_custom_init_shape_error(): + # Check that an informative error is raised when custom initialization does not + # have the right shape + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H = rng.random_sample((2, 5)) + nmf = NMF(n_components=2, init="custom", random_state=0) + + with pytest.raises(ValueError, match="Array with wrong first dimension passed"): + nmf.fit(X, H=H, W=rng.random_sample((5, 2))) + + with pytest.raises(ValueError, match="Array with wrong second dimension passed"): + nmf.fit(X, H=H, W=rng.random_sample((6, 3))) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py new file mode 100644 index 0000000000000000000000000000000000000000..d442d0beeb57394b276ae4de9f683886d982f29e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py @@ -0,0 +1,477 @@ +import sys +from io import StringIO + +import numpy as np +import pytest +from numpy.testing import assert_array_equal +from scipy.linalg import block_diag +from scipy.special import psi + +from sklearn.decomposition import LatentDirichletAllocation +from sklearn.decomposition._online_lda_fast import ( + _dirichlet_expectation_1d, + _dirichlet_expectation_2d, +) +from sklearn.exceptions import NotFittedError +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + if_safe_multiprocessing_with_blas, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def _build_sparse_array(csr_container): + # Create 3 topics and each topic has 3 distinct words. + # (Each word only belongs to a single topic.) + n_components = 3 + block = np.full((3, 3), n_components, dtype=int) + blocks = [block] * n_components + X = block_diag(*blocks) + X = csr_container(X) + return (n_components, X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_default_prior_params(csr_container): + # default prior parameter should be `1 / topics` + # and verbose params should not affect result + n_components, X = _build_sparse_array(csr_container) + prior = 1.0 / n_components + lda_1 = LatentDirichletAllocation( + n_components=n_components, + doc_topic_prior=prior, + topic_word_prior=prior, + random_state=0, + ) + lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0) + topic_distr_1 = lda_1.fit_transform(X) + topic_distr_2 = lda_2.fit_transform(X) + assert_almost_equal(topic_distr_1, topic_distr_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_batch(csr_container): + # Test LDA batch learning_offset (`fit` method with 'batch' learning) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + evaluate_every=1, + learning_method="batch", + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_online(csr_container): + # Test LDA online learning (`fit` method with 'online' learning) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=10.0, + evaluate_every=1, + learning_method="online", + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_partial_fit(csr_container): + # Test LDA online learning (`partial_fit` method) + # (same as test_lda_batch) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=10.0, + total_samples=100, + random_state=rng, + ) + for i in range(3): + lda.partial_fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_dense_input(csr_container): + # Test LDA with dense input. + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, learning_method="batch", random_state=rng + ) + lda.fit(X.toarray()) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +def test_lda_transform(): + # Test LDA transform. + # Transform result cannot be negative and should be normalized + rng = np.random.RandomState(0) + X = rng.randint(5, size=(20, 10)) + n_components = 3 + lda = LatentDirichletAllocation(n_components=n_components, random_state=rng) + X_trans = lda.fit_transform(X) + assert (X_trans > 0.0).any() + assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0])) + + +@pytest.mark.parametrize("method", ("online", "batch")) +def test_lda_fit_transform(method): + # Test LDA fit_transform & transform + # fit_transform and transform result should be the same + rng = np.random.RandomState(0) + X = rng.randint(10, size=(50, 20)) + lda = LatentDirichletAllocation( + n_components=5, learning_method=method, random_state=rng + ) + X_fit = lda.fit_transform(X) + X_trans = lda.transform(X) + assert_array_almost_equal(X_fit, X_trans, 4) + + +def test_lda_negative_input(): + # test pass dense matrix with sparse negative input. + X = np.full((5, 10), -1.0) + lda = LatentDirichletAllocation() + regex = r"^Negative values in data passed" + with pytest.raises(ValueError, match=regex): + lda.fit(X) + + +def test_lda_no_component_error(): + # test `perplexity` before `fit` + rng = np.random.RandomState(0) + X = rng.randint(4, size=(20, 10)) + lda = LatentDirichletAllocation() + regex = ( + "This LatentDirichletAllocation instance is not fitted yet. " + "Call 'fit' with appropriate arguments before using this " + "estimator." + ) + with pytest.raises(NotFittedError, match=regex): + lda.perplexity(X) + + +@if_safe_multiprocessing_with_blas +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("method", ("online", "batch")) +def test_lda_multi_jobs(method, csr_container): + n_components, X = _build_sparse_array(csr_container) + # Test LDA batch training with multi CPU + rng = np.random.RandomState(0) + lda = LatentDirichletAllocation( + n_components=n_components, + n_jobs=2, + learning_method=method, + evaluate_every=1, + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@if_safe_multiprocessing_with_blas +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_partial_fit_multi_jobs(csr_container): + # Test LDA online training with multi CPU + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + n_jobs=2, + learning_offset=5.0, + total_samples=30, + random_state=rng, + ) + for i in range(2): + lda.partial_fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +def test_lda_preplexity_mismatch(): + # test dimension mismatch in `perplexity` method + rng = np.random.RandomState(0) + n_components = rng.randint(3, 6) + n_samples = rng.randint(6, 10) + X = np.random.randint(4, size=(n_samples, 10)) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=5.0, + total_samples=20, + random_state=rng, + ) + lda.fit(X) + # invalid samples + invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components)) + with pytest.raises(ValueError, match=r"Number of samples"): + lda._perplexity_precomp_distr(X, invalid_n_samples) + # invalid topic number + invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1)) + with pytest.raises(ValueError, match=r"Number of topics"): + lda._perplexity_precomp_distr(X, invalid_n_components) + + +@pytest.mark.parametrize("method", ("online", "batch")) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_perplexity(method, csr_container): + # Test LDA perplexity for batch training + # perplexity should be lower after each iteration + n_components, X = _build_sparse_array(csr_container) + lda_1 = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_2 = LatentDirichletAllocation( + n_components=n_components, + max_iter=10, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_1.fit(X) + perp_1 = lda_1.perplexity(X, sub_sampling=False) + + lda_2.fit(X) + perp_2 = lda_2.perplexity(X, sub_sampling=False) + assert perp_1 >= perp_2 + + perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True) + perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True) + assert perp_1_subsampling >= perp_2_subsampling + + +@pytest.mark.parametrize("method", ("online", "batch")) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_score(method, csr_container): + # Test LDA score for batch training + # score should be higher after each iteration + n_components, X = _build_sparse_array(csr_container) + lda_1 = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_2 = LatentDirichletAllocation( + n_components=n_components, + max_iter=10, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_1.fit_transform(X) + score_1 = lda_1.score(X) + + lda_2.fit_transform(X) + score_2 = lda_2.score(X) + assert score_2 >= score_1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_perplexity_input_format(csr_container): + # Test LDA perplexity for sparse and dense input + # score should be the same for both dense and sparse input + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method="batch", + total_samples=100, + random_state=0, + ) + lda.fit(X) + perp_1 = lda.perplexity(X) + perp_2 = lda.perplexity(X.toarray()) + assert_almost_equal(perp_1, perp_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_score_perplexity(csr_container): + # Test the relationship between LDA score and perplexity + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, max_iter=10, random_state=0 + ) + lda.fit(X) + perplexity_1 = lda.perplexity(X, sub_sampling=False) + + score = lda.score(X) + perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data))) + assert_almost_equal(perplexity_1, perplexity_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_perplexity(csr_container): + # Test that the perplexity computed during fit is consistent with what is + # returned by the perplexity method + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method="batch", + random_state=0, + evaluate_every=1, + ) + lda.fit(X) + + # Perplexity computed at end of fit method + perplexity1 = lda.bound_ + + # Result of perplexity method on the train set + perplexity2 = lda.perplexity(X) + + assert_almost_equal(perplexity1, perplexity2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_empty_docs(csr_container): + """Test LDA on empty document (all-zero rows).""" + Z = np.zeros((5, 4)) + for X in [Z, csr_container(Z)]: + lda = LatentDirichletAllocation(max_iter=750).fit(X) + assert_almost_equal( + lda.components_.sum(axis=0), np.ones(lda.components_.shape[1]) + ) + + +def test_dirichlet_expectation(): + """Test Cython version of Dirichlet expectation calculation.""" + x = np.logspace(-100, 10, 10000) + expectation = np.empty_like(x) + _dirichlet_expectation_1d(x, 0, expectation) + assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19) + + x = x.reshape(100, 100) + assert_allclose( + _dirichlet_expectation_2d(x), + psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]), + rtol=1e-11, + atol=3e-9, + ) + + +def check_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container +): + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=3, + learning_method="batch", + verbose=verbose, + evaluate_every=evaluate_every, + random_state=0, + ) + out = StringIO() + old_out, sys.stdout = sys.stdout, out + try: + lda.fit(X) + finally: + sys.stdout = old_out + + n_lines = out.getvalue().count("\n") + n_perplexity = out.getvalue().count("perplexity") + assert expected_lines == n_lines + assert expected_perplexities == n_perplexity + + +@pytest.mark.parametrize( + "verbose,evaluate_every,expected_lines,expected_perplexities", + [ + (False, 1, 0, 0), + (False, 0, 0, 0), + (True, 0, 3, 0), + (True, 1, 3, 3), + (True, 2, 3, 1), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container +): + check_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_feature_names_out(csr_container): + """Check feature names out for LatentDirichletAllocation.""" + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation(n_components=n_components).fit(X) + + names = lda.get_feature_names_out() + assert_array_equal( + [f"latentdirichletallocation{i}" for i in range(n_components)], names + ) + + +@pytest.mark.parametrize("learning_method", ("batch", "online")) +def test_lda_dtype_match(learning_method, global_dtype): + """Check data type preservation of fitted attributes.""" + rng = np.random.RandomState(0) + X = rng.uniform(size=(20, 10)).astype(global_dtype, copy=False) + + lda = LatentDirichletAllocation( + n_components=5, random_state=0, learning_method=learning_method + ) + lda.fit(X) + assert lda.components_.dtype == global_dtype + assert lda.exp_dirichlet_component_.dtype == global_dtype + + +@pytest.mark.parametrize("learning_method", ("batch", "online")) +def test_lda_numerical_consistency(learning_method, global_random_seed): + """Check numerical consistency between np.float32 and np.float64.""" + rng = np.random.RandomState(global_random_seed) + X64 = rng.uniform(size=(20, 10)) + X32 = X64.astype(np.float32) + + lda_64 = LatentDirichletAllocation( + n_components=5, random_state=global_random_seed, learning_method=learning_method + ).fit(X64) + lda_32 = LatentDirichletAllocation( + n_components=5, random_state=global_random_seed, learning_method=learning_method + ).fit(X32) + + assert_allclose(lda_32.components_, lda_64.components_) + assert_allclose(lda_32.transform(X32), lda_64.transform(X64)) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..44281b9038697e56228a8e0584f7c8ba81d8b969 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py @@ -0,0 +1,987 @@ +import re +import warnings + +import numpy as np +import pytest +import scipy as sp +from numpy.testing import assert_array_equal + +from sklearn import config_context, datasets +from sklearn.base import clone +from sklearn.datasets import load_iris, make_classification +from sklearn.decomposition import PCA +from sklearn.decomposition._pca import _assess_dimension, _infer_dimension +from sklearn.utils._array_api import ( + _atol_for_type, + _convert_to_numpy, + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._array_api import device as array_device +from sklearn.utils._testing import _array_api_for_tests, assert_allclose +from sklearn.utils.estimator_checks import ( + _get_check_estimator_ids, + check_array_api_input_and_values, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +iris = datasets.load_iris() +PCA_SOLVERS = ["full", "arpack", "randomized", "auto"] + +# `SPARSE_M` and `SPARSE_N` could be larger, but be aware: +# * SciPy's generation of random sparse matrix can be costly +# * A (SPARSE_M, SPARSE_N) dense array is allocated to compare against +SPARSE_M, SPARSE_N = 1000, 300 # arbitrary +SPARSE_MAX_COMPONENTS = min(SPARSE_M, SPARSE_N) + + +def _check_fitted_pca_close(pca1, pca2, rtol): + assert_allclose(pca1.components_, pca2.components_, rtol=rtol) + assert_allclose(pca1.explained_variance_, pca2.explained_variance_, rtol=rtol) + assert_allclose(pca1.singular_values_, pca2.singular_values_, rtol=rtol) + assert_allclose(pca1.mean_, pca2.mean_, rtol=rtol) + assert_allclose(pca1.n_components_, pca2.n_components_, rtol=rtol) + assert_allclose(pca1.n_samples_, pca2.n_samples_, rtol=rtol) + assert_allclose(pca1.noise_variance_, pca2.noise_variance_, rtol=rtol) + assert_allclose(pca1.n_features_in_, pca2.n_features_in_, rtol=rtol) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +@pytest.mark.parametrize("n_components", range(1, iris.data.shape[1])) +def test_pca(svd_solver, n_components): + X = iris.data + pca = PCA(n_components=n_components, svd_solver=svd_solver) + + # check the shape of fit.transform + X_r = pca.fit(X).transform(X) + assert X_r.shape[1] == n_components + + # check the equivalence of fit.transform and fit_transform + X_r2 = pca.fit_transform(X) + assert_allclose(X_r, X_r2) + X_r = pca.transform(X) + assert_allclose(X_r, X_r2) + + # Test get_covariance and get_precision + cov = pca.get_covariance() + precision = pca.get_precision() + assert_allclose(np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-12) + + +@pytest.mark.parametrize("density", [0.01, 0.1, 0.30]) +@pytest.mark.parametrize("n_components", [1, 2, 10]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +@pytest.mark.parametrize("svd_solver", ["arpack"]) +@pytest.mark.parametrize("scale", [1, 10, 100]) +def test_pca_sparse( + global_random_seed, svd_solver, sparse_container, n_components, density, scale +): + # Make sure any tolerance changes pass with SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all" + rtol = 5e-07 + transform_rtol = 3e-05 + + random_state = np.random.default_rng(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=density, + ) + ) + # Scale the data + vary the column means + scale_vector = random_state.random(X.shape[1]) * scale + X = X.multiply(scale_vector) + + pca = PCA( + n_components=n_components, + svd_solver=svd_solver, + random_state=global_random_seed, + ) + pca.fit(X) + + Xd = X.toarray() + pcad = PCA( + n_components=n_components, + svd_solver=svd_solver, + random_state=global_random_seed, + ) + pcad.fit(Xd) + + # Fitted attributes equality + _check_fitted_pca_close(pca, pcad, rtol=rtol) + + # Test transform + X2 = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=density, + ) + ) + X2d = X2.toarray() + + assert_allclose(pca.transform(X2), pca.transform(X2d), rtol=transform_rtol) + assert_allclose(pca.transform(X2), pcad.transform(X2d), rtol=transform_rtol) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_pca_sparse_fit_transform(global_random_seed, sparse_container): + random_state = np.random.default_rng(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=0.01, + ) + ) + X2 = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=0.01, + ) + ) + + pca_fit = PCA(n_components=10, svd_solver="arpack", random_state=global_random_seed) + pca_fit_transform = PCA( + n_components=10, svd_solver="arpack", random_state=global_random_seed + ) + + pca_fit.fit(X) + transformed_X = pca_fit_transform.fit_transform(X) + + _check_fitted_pca_close(pca_fit, pca_fit_transform, rtol=1e-10) + assert_allclose(transformed_X, pca_fit_transform.transform(X), rtol=2e-9) + assert_allclose(transformed_X, pca_fit.transform(X), rtol=2e-9) + assert_allclose(pca_fit.transform(X2), pca_fit_transform.transform(X2), rtol=2e-9) + + +@pytest.mark.parametrize("svd_solver", ["randomized", "full", "auto"]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_sparse_pca_solver_error(global_random_seed, svd_solver, sparse_container): + random_state = np.random.RandomState(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + ) + ) + pca = PCA(n_components=30, svd_solver=svd_solver) + error_msg_pattern = ( + f'PCA only support sparse inputs with the "arpack" solver, while "{svd_solver}"' + " was passed" + ) + with pytest.raises(TypeError, match=error_msg_pattern): + pca.fit(X) + + +def test_no_empty_slice_warning(): + # test if we avoid numpy warnings for computing over empty arrays + n_components = 10 + n_features = n_components + 2 # anything > n_comps triggered it in 0.16 + X = np.random.uniform(-1, 1, size=(n_components, n_features)) + pca = PCA(n_components=n_components) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + pca.fit(X) + + +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize("solver", PCA_SOLVERS) +def test_whitening(solver, copy): + # Check that PCA output has unit-variance + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 80 + n_components = 30 + rank = 50 + + # some low rank data with correlated features + X = np.dot( + rng.randn(n_samples, rank), + np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features)), + ) + # the component-wise variance of the first 50 features is 3 times the + # mean component-wise variance of the remaining 30 features + X[:, :50] *= 3 + + assert X.shape == (n_samples, n_features) + + # the component-wise variance is thus highly varying: + assert X.std(axis=0).std() > 43.8 + + # whiten the data while projecting to the lower dim subspace + X_ = X.copy() # make sure we keep an original across iterations. + pca = PCA( + n_components=n_components, + whiten=True, + copy=copy, + svd_solver=solver, + random_state=0, + iterated_power=7, + ) + # test fit_transform + X_whitened = pca.fit_transform(X_.copy()) + assert X_whitened.shape == (n_samples, n_components) + X_whitened2 = pca.transform(X_) + assert_allclose(X_whitened, X_whitened2, rtol=5e-4) + + assert_allclose(X_whitened.std(ddof=1, axis=0), np.ones(n_components)) + assert_allclose(X_whitened.mean(axis=0), np.zeros(n_components), atol=1e-12) + + X_ = X.copy() + pca = PCA( + n_components=n_components, whiten=False, copy=copy, svd_solver=solver + ).fit(X_.copy()) + X_unwhitened = pca.transform(X_) + assert X_unwhitened.shape == (n_samples, n_components) + + # in that case the output components still have varying variances + assert X_unwhitened.std(axis=0).std() == pytest.approx(74.1, rel=1e-1) + # we always center, so no test for non-centering. + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_explained_variance_equivalence_solver(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca_full = PCA(n_components=2, svd_solver="full") + pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=0) + + pca_full.fit(X) + pca_other.fit(X) + + assert_allclose( + pca_full.explained_variance_, pca_other.explained_variance_, rtol=5e-2 + ) + assert_allclose( + pca_full.explained_variance_ratio_, + pca_other.explained_variance_ratio_, + rtol=5e-2, + ) + + +@pytest.mark.parametrize( + "X", + [ + np.random.RandomState(0).randn(100, 80), + datasets.make_classification(100, 80, n_informative=78, random_state=0)[0], + ], + ids=["random-data", "correlated-data"], +) +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_explained_variance_empirical(X, svd_solver): + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=0) + X_pca = pca.fit_transform(X) + assert_allclose(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0)) + + expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0] + expected_result = sorted(expected_result, reverse=True)[:2] + assert_allclose(pca.explained_variance_, expected_result, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_singular_values_consistency(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca_full = PCA(n_components=2, svd_solver="full", random_state=rng) + pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + + pca_full.fit(X) + pca_other.fit(X) + + assert_allclose(pca_full.singular_values_, pca_other.singular_values_, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_singular_values(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + X_trans = pca.fit_transform(X) + + # compare to the Frobenius norm + assert_allclose( + np.sum(pca.singular_values_**2), np.linalg.norm(X_trans, "fro") ** 2 + ) + # Compare to the 2-norms of the score vectors + assert_allclose(pca.singular_values_, np.sqrt(np.sum(X_trans**2, axis=0))) + + # set the singular values and see what er get back + n_samples, n_features = 100, 110 + X = rng.randn(n_samples, n_features) + + pca = PCA(n_components=3, svd_solver=svd_solver, random_state=rng) + X_trans = pca.fit_transform(X) + X_trans /= np.sqrt(np.sum(X_trans**2, axis=0)) + X_trans[:, 0] *= 3.142 + X_trans[:, 1] *= 2.718 + X_hat = np.dot(X_trans, pca.components_) + pca.fit(X_hat) + assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0]) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_check_projection(svd_solver): + # Test that the projection of data is correct + rng = np.random.RandomState(0) + n, p = 100, 3 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5]) + Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) + + Yt = PCA(n_components=2, svd_solver=svd_solver).fit(X).transform(Xt) + Yt /= np.sqrt((Yt**2).sum()) + + assert_allclose(np.abs(Yt[0][0]), 1.0, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_check_projection_list(svd_solver): + # Test that the projection of data is correct + X = [[1.0, 0.0], [0.0, 1.0]] + pca = PCA(n_components=1, svd_solver=svd_solver, random_state=0) + X_trans = pca.fit_transform(X) + assert X_trans.shape, (2, 1) + assert_allclose(X_trans.mean(), 0.00, atol=1e-12) + assert_allclose(X_trans.std(), 0.71, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", ["full", "arpack", "randomized"]) +@pytest.mark.parametrize("whiten", [False, True]) +def test_pca_inverse(svd_solver, whiten): + # Test that the projection of data can be inverted + rng = np.random.RandomState(0) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + pca = PCA(n_components=2, svd_solver=svd_solver, whiten=whiten).fit(X) + Y = pca.transform(X) + Y_inverse = pca.inverse_transform(Y) + assert_allclose(X, Y_inverse, rtol=5e-6) + + +@pytest.mark.parametrize( + "data", [np.array([[0, 1, 0], [1, 0, 0]]), np.array([[0, 1, 0], [1, 0, 0]]).T] +) +@pytest.mark.parametrize( + "svd_solver, n_components, err_msg", + [ + ("arpack", 0, r"must be between 1 and min\(n_samples, n_features\)"), + ("randomized", 0, r"must be between 1 and min\(n_samples, n_features\)"), + ("arpack", 2, r"must be strictly less than min"), + ( + "auto", + 3, + ( + r"n_components=3 must be between 0 and min\(n_samples, " + r"n_features\)=2 with svd_solver='full'" + ), + ), + ], +) +def test_pca_validation(svd_solver, data, n_components, err_msg): + # Ensures that solver-specific extreme inputs for the n_components + # parameter raise errors + smallest_d = 2 # The smallest dimension + pca_fitted = PCA(n_components, svd_solver=svd_solver) + + with pytest.raises(ValueError, match=err_msg): + pca_fitted.fit(data) + + # Additional case for arpack + if svd_solver == "arpack": + n_components = smallest_d + + err_msg = ( + "n_components={}L? must be strictly less than " + r"min\(n_samples, n_features\)={}L? with " + "svd_solver='arpack'".format(n_components, smallest_d) + ) + with pytest.raises(ValueError, match=err_msg): + PCA(n_components, svd_solver=svd_solver).fit(data) + + +@pytest.mark.parametrize( + "solver, n_components_", + [ + ("full", min(iris.data.shape)), + ("arpack", min(iris.data.shape) - 1), + ("randomized", min(iris.data.shape)), + ], +) +@pytest.mark.parametrize("data", [iris.data, iris.data.T]) +def test_n_components_none(data, solver, n_components_): + pca = PCA(svd_solver=solver) + pca.fit(data) + assert pca.n_components_ == n_components_ + + +@pytest.mark.parametrize("svd_solver", ["auto", "full"]) +def test_n_components_mle(svd_solver): + # Ensure that n_components == 'mle' doesn't raise error for auto/full + rng = np.random.RandomState(0) + n_samples, n_features = 600, 10 + X = rng.randn(n_samples, n_features) + pca = PCA(n_components="mle", svd_solver=svd_solver) + pca.fit(X) + assert pca.n_components_ == 1 + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_n_components_mle_error(svd_solver): + # Ensure that n_components == 'mle' will raise an error for unsupported + # solvers + rng = np.random.RandomState(0) + n_samples, n_features = 600, 10 + X = rng.randn(n_samples, n_features) + pca = PCA(n_components="mle", svd_solver=svd_solver) + err_msg = "n_components='mle' cannot be a string with svd_solver='{}'".format( + svd_solver + ) + with pytest.raises(ValueError, match=err_msg): + pca.fit(X) + + +def test_pca_dim(): + # Check automated dimensionality setting + rng = np.random.RandomState(0) + n, p = 100, 5 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + pca = PCA(n_components="mle", svd_solver="full").fit(X) + assert pca.n_components == "mle" + assert pca.n_components_ == 1 + + +def test_infer_dim_1(): + # TODO: explain what this is testing + # Or at least use explicit variable names... + n, p = 1000, 5 + rng = np.random.RandomState(0) + X = ( + rng.randn(n, p) * 0.1 + + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) + + np.array([1, 0, 7, 4, 6]) + ) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + ll = np.array([_assess_dimension(spect, k, n) for k in range(1, p)]) + assert ll[1] > ll.max() - 0.01 * n + + +def test_infer_dim_2(): + # TODO: explain what this is testing + # Or at least use explicit variable names... + n, p = 1000, 5 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + X[10:20] += np.array([6, 0, 7, 2, -1]) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + assert _infer_dimension(spect, n) > 1 + + +def test_infer_dim_3(): + n, p = 100, 5 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + X[10:20] += np.array([6, 0, 7, 2, -1]) + X[30:40] += 2 * np.array([-1, 1, -1, 1, -1]) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + assert _infer_dimension(spect, n) > 2 + + +@pytest.mark.parametrize( + "X, n_components, n_components_validated", + [ + (iris.data, 0.95, 2), # row > col + (iris.data, 0.01, 1), # row > col + (np.random.RandomState(0).rand(5, 20), 0.5, 2), + ], # row < col +) +def test_infer_dim_by_explained_variance(X, n_components, n_components_validated): + pca = PCA(n_components=n_components, svd_solver="full") + pca.fit(X) + assert pca.n_components == pytest.approx(n_components) + assert pca.n_components_ == n_components_validated + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_score(svd_solver): + # Test that probabilistic PCA scoring yields a reasonable score + n, p = 1000, 3 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5]) + pca = PCA(n_components=2, svd_solver=svd_solver) + pca.fit(X) + + ll1 = pca.score(X) + h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1**2) * p + assert_allclose(ll1 / h, 1, rtol=5e-2) + + ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5])) + assert ll1 > ll2 + + pca = PCA(n_components=2, whiten=True, svd_solver=svd_solver) + pca.fit(X) + ll2 = pca.score(X) + assert ll1 > ll2 + + +def test_pca_score3(): + # Check that probabilistic PCA selects the right model + n, p = 200, 3 + rng = np.random.RandomState(0) + Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7]) + Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7]) + ll = np.zeros(p) + for k in range(p): + pca = PCA(n_components=k, svd_solver="full") + pca.fit(Xl) + ll[k] = pca.score(Xt) + + assert ll.argmax() == 1 + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_sanity_noise_variance(svd_solver): + # Sanity check for the noise_variance_. For more details see + # https://github.com/scikit-learn/scikit-learn/issues/7568 + # https://github.com/scikit-learn/scikit-learn/issues/8541 + # https://github.com/scikit-learn/scikit-learn/issues/8544 + X, _ = datasets.load_digits(return_X_y=True) + pca = PCA(n_components=30, svd_solver=svd_solver, random_state=0) + pca.fit(X) + assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0) + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_score_consistency_solvers(svd_solver): + # Check the consistency of score between solvers + X, _ = datasets.load_digits(return_X_y=True) + pca_full = PCA(n_components=30, svd_solver="full", random_state=0) + pca_other = PCA(n_components=30, svd_solver=svd_solver, random_state=0) + pca_full.fit(X) + pca_other.fit(X) + assert_allclose(pca_full.score(X), pca_other.score(X), rtol=5e-6) + + +# arpack raises ValueError for n_components == min(n_samples, n_features) +@pytest.mark.parametrize("svd_solver", ["full", "randomized"]) +def test_pca_zero_noise_variance_edge_cases(svd_solver): + # ensure that noise_variance_ is 0 in edge cases + # when n_components == min(n_samples, n_features) + n, p = 100, 3 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5]) + + pca = PCA(n_components=p, svd_solver=svd_solver) + pca.fit(X) + assert pca.noise_variance_ == 0 + # Non-regression test for gh-12489 + # ensure no divide-by-zero error for n_components == n_features < n_samples + pca.score(X) + + pca.fit(X.T) + assert pca.noise_variance_ == 0 + # Non-regression test for gh-12489 + # ensure no divide-by-zero error for n_components == n_samples < n_features + pca.score(X.T) + + +@pytest.mark.parametrize( + "data, n_components, expected_solver", + [ # case: n_components in (0,1) => 'full' + (np.random.RandomState(0).uniform(size=(1000, 50)), 0.5, "full"), + # case: max(X.shape) <= 500 => 'full' + (np.random.RandomState(0).uniform(size=(10, 50)), 5, "full"), + # case: n_components >= .8 * min(X.shape) => 'full' + (np.random.RandomState(0).uniform(size=(1000, 50)), 50, "full"), + # n_components >= 1 and n_components < .8*min(X.shape) => 'randomized' + (np.random.RandomState(0).uniform(size=(1000, 50)), 10, "randomized"), + ], +) +def test_pca_svd_solver_auto(data, n_components, expected_solver): + pca_auto = PCA(n_components=n_components, random_state=0) + pca_test = PCA( + n_components=n_components, svd_solver=expected_solver, random_state=0 + ) + pca_auto.fit(data) + pca_test.fit(data) + assert_allclose(pca_auto.components_, pca_test.components_) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_deterministic_output(svd_solver): + rng = np.random.RandomState(0) + X = rng.rand(10, 10) + + transformed_X = np.zeros((20, 2)) + for i in range(20): + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + transformed_X[i, :] = pca.fit_transform(X)[0] + assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2)) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_dtype_preservation(svd_solver): + check_pca_float_dtype_preservation(svd_solver) + check_pca_int_dtype_upcast_to_double(svd_solver) + + +def check_pca_float_dtype_preservation(svd_solver): + # Ensure that PCA does not upscale the dtype when input is float32 + X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False) + X_32 = X_64.astype(np.float32) + + pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64) + pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32) + + assert pca_64.components_.dtype == np.float64 + assert pca_32.components_.dtype == np.float32 + assert pca_64.transform(X_64).dtype == np.float64 + assert pca_32.transform(X_32).dtype == np.float32 + + # the rtol is set such that the test passes on all platforms tested on + # conda-forge: PR#15775 + # see: https://github.com/conda-forge/scikit-learn-feedstock/pull/113 + assert_allclose(pca_64.components_, pca_32.components_, rtol=2e-4) + + +def check_pca_int_dtype_upcast_to_double(svd_solver): + # Ensure that all int types will be upcast to float64 + X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4)) + X_i64 = X_i64.astype(np.int64, copy=False) + X_i32 = X_i64.astype(np.int32, copy=False) + + pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64) + pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32) + + assert pca_64.components_.dtype == np.float64 + assert pca_32.components_.dtype == np.float64 + assert pca_64.transform(X_i64).dtype == np.float64 + assert pca_32.transform(X_i32).dtype == np.float64 + + assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-4) + + +def test_pca_n_components_mostly_explained_variance_ratio(): + # when n_components is the second highest cumulative sum of the + # explained_variance_ratio_, then n_components_ should equal the + # number of features in the dataset #15669 + X, y = load_iris(return_X_y=True) + pca1 = PCA().fit(X, y) + + n_components = pca1.explained_variance_ratio_.cumsum()[-2] + pca2 = PCA(n_components=n_components).fit(X, y) + assert pca2.n_components_ == X.shape[1] + + +def test_assess_dimension_bad_rank(): + # Test error when tested rank not in [1, n_features - 1] + spectrum = np.array([1, 1e-30, 1e-30, 1e-30]) + n_samples = 10 + for rank in (0, 5): + with pytest.raises(ValueError, match=r"should be in \[1, n_features - 1\]"): + _assess_dimension(spectrum, rank, n_samples) + + +def test_small_eigenvalues_mle(): + # Test rank associated with tiny eigenvalues are given a log-likelihood of + # -inf. The inferred rank will be 1 + spectrum = np.array([1, 1e-30, 1e-30, 1e-30]) + + assert _assess_dimension(spectrum, rank=1, n_samples=10) > -np.inf + + for rank in (2, 3): + assert _assess_dimension(spectrum, rank, 10) == -np.inf + + assert _infer_dimension(spectrum, 10) == 1 + + +def test_mle_redundant_data(): + # Test 'mle' with pathological X: only one relevant feature should give a + # rank of 1 + X, _ = datasets.make_classification( + n_features=20, + n_informative=1, + n_repeated=18, + n_redundant=1, + n_clusters_per_class=1, + random_state=42, + ) + pca = PCA(n_components="mle").fit(X) + assert pca.n_components_ == 1 + + +def test_fit_mle_too_few_samples(): + # Tests that an error is raised when the number of samples is smaller + # than the number of features during an mle fit + X, _ = datasets.make_classification(n_samples=20, n_features=21, random_state=42) + + pca = PCA(n_components="mle", svd_solver="full") + with pytest.raises( + ValueError, + match="n_components='mle' is only supported if n_samples >= n_features", + ): + pca.fit(X) + + +def test_mle_simple_case(): + # non-regression test for issue + # https://github.com/scikit-learn/scikit-learn/issues/16730 + n_samples, n_dim = 1000, 10 + X = np.random.RandomState(0).randn(n_samples, n_dim) + X[:, -1] = np.mean(X[:, :-1], axis=-1) # true X dim is ndim - 1 + pca_skl = PCA("mle", svd_solver="full") + pca_skl.fit(X) + assert pca_skl.n_components_ == n_dim - 1 + + +def test_assess_dimesion_rank_one(): + # Make sure assess_dimension works properly on a matrix of rank 1 + n_samples, n_features = 9, 6 + X = np.ones((n_samples, n_features)) # rank 1 matrix + _, s, _ = np.linalg.svd(X, full_matrices=True) + # except for rank 1, all eigenvalues are 0 resp. close to 0 (FP) + assert_allclose(s[1:], np.zeros(n_features - 1), atol=1e-12) + + assert np.isfinite(_assess_dimension(s, rank=1, n_samples=n_samples)) + for rank in range(2, n_features): + assert _assess_dimension(s, rank, n_samples) == -np.inf + + +def test_pca_randomized_svd_n_oversamples(): + """Check that exposing and setting `n_oversamples` will provide accurate results + even when `X` as a large number of features. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20589 + """ + rng = np.random.RandomState(0) + n_features = 100 + X = rng.randn(1_000, n_features) + + # The default value of `n_oversamples` will lead to inaccurate results + # We force it to the number of features. + pca_randomized = PCA( + n_components=1, + svd_solver="randomized", + n_oversamples=n_features, + random_state=0, + ).fit(X) + pca_full = PCA(n_components=1, svd_solver="full").fit(X) + pca_arpack = PCA(n_components=1, svd_solver="arpack", random_state=0).fit(X) + + assert_allclose(np.abs(pca_full.components_), np.abs(pca_arpack.components_)) + assert_allclose(np.abs(pca_randomized.components_), np.abs(pca_arpack.components_)) + + +def test_feature_names_out(): + """Check feature names out for PCA.""" + pca = PCA(n_components=2).fit(iris.data) + + names = pca.get_feature_names_out() + assert_array_equal([f"pca{i}" for i in range(2)], names) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_variance_correctness(copy): + """Check the accuracy of PCA's internal variance calculation""" + rng = np.random.RandomState(0) + X = rng.randn(1000, 200) + pca = PCA().fit(X) + pca_var = pca.explained_variance_ / pca.explained_variance_ratio_ + true_var = np.var(X, ddof=1, axis=0).sum() + np.testing.assert_allclose(pca_var, true_var) + + +def check_array_api_get_precision(name, estimator, array_namespace, device, dtype_name): + xp = _array_api_for_tests(array_namespace, device) + iris_np = iris.data.astype(dtype_name) + iris_xp = xp.asarray(iris_np, device=device) + + estimator.fit(iris_np) + precision_np = estimator.get_precision() + covariance_np = estimator.get_covariance() + + with config_context(array_api_dispatch=True): + estimator_xp = clone(estimator).fit(iris_xp) + precision_xp = estimator_xp.get_precision() + assert precision_xp.shape == (4, 4) + assert precision_xp.dtype == iris_xp.dtype + + assert_allclose( + _convert_to_numpy(precision_xp, xp=xp), + precision_np, + atol=_atol_for_type(dtype_name), + ) + covariance_xp = estimator_xp.get_covariance() + assert covariance_xp.shape == (4, 4) + assert covariance_xp.dtype == iris_xp.dtype + + assert_allclose( + _convert_to_numpy(covariance_xp, xp=xp), + covariance_np, + atol=_atol_for_type(dtype_name), + ) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_input_and_values, check_array_api_get_precision], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + PCA(n_components=2, svd_solver="full"), + PCA(n_components=0.1, svd_solver="full", whiten=True), + PCA( + n_components=2, + svd_solver="randomized", + power_iteration_normalizer="QR", + random_state=0, # how to use global_random_seed here? + ), + ], + ids=_get_check_estimator_ids, +) +def test_pca_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_get_precision], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + # PCA with mle cannot use check_array_api_input_and_values because of + # rounding errors in the noisy (low variance) components. Even checking + # the shape of the `components_` is problematic because the number of + # components depends on trimming threshold of the mle algorithm which + # can depend on device-specific rounding errors. + PCA(n_components="mle", svd_solver="full"), + ], + ids=_get_check_estimator_ids, +) +def test_pca_mle_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + # Simpler variant of the generic check_array_api_input checker tailored for + # the specific case of PCA with mle-trimmed components. + xp = _array_api_for_tests(array_namespace, device) + + X, y = make_classification(random_state=42) + X = X.astype(dtype_name, copy=False) + atol = _atol_for_type(X.dtype) + + est = clone(estimator) + + X_xp = xp.asarray(X, device=device) + y_xp = xp.asarray(y, device=device) + + est.fit(X, y) + + components_np = est.components_ + explained_variance_np = est.explained_variance_ + + est_xp = clone(est) + with config_context(array_api_dispatch=True): + est_xp.fit(X_xp, y_xp) + components_xp = est_xp.components_ + assert array_device(components_xp) == array_device(X_xp) + components_xp_np = _convert_to_numpy(components_xp, xp=xp) + + explained_variance_xp = est_xp.explained_variance_ + assert array_device(explained_variance_xp) == array_device(X_xp) + explained_variance_xp_np = _convert_to_numpy(explained_variance_xp, xp=xp) + + assert components_xp_np.dtype == components_np.dtype + assert components_xp_np.shape[1] == components_np.shape[1] + assert explained_variance_xp_np.dtype == explained_variance_np.dtype + + # Check that the explained variance values match for the + # common components: + min_components = min(components_xp_np.shape[0], components_np.shape[0]) + assert_allclose( + explained_variance_xp_np[:min_components], + explained_variance_np[:min_components], + atol=atol, + ) + + # If the number of components differ, check that the explained variance of + # the trimmed components is very small. + if components_xp_np.shape[0] != components_np.shape[0]: + reference_variance = explained_variance_np[-1] + extra_variance_np = explained_variance_np[min_components:] + extra_variance_xp_np = explained_variance_xp_np[min_components:] + assert all(np.abs(extra_variance_np - reference_variance) < atol) + assert all(np.abs(extra_variance_xp_np - reference_variance) < atol) + + +def test_array_api_error_and_warnings_on_unsupported_params(): + pytest.importorskip("array_api_compat") + xp = pytest.importorskip("numpy.array_api") + iris_xp = xp.asarray(iris.data) + + pca = PCA(n_components=2, svd_solver="arpack", random_state=0) + expected_msg = re.escape( + "PCA with svd_solver='arpack' is not supported for Array API inputs." + ) + with pytest.raises(ValueError, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) + + pca.set_params(svd_solver="randomized", power_iteration_normalizer="LU") + expected_msg = re.escape( + "Array API does not support LU factorization. Set" + " `power_iteration_normalizer='QR'` instead." + ) + with pytest.raises(ValueError, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) + + pca.set_params(svd_solver="randomized", power_iteration_normalizer="auto") + expected_msg = re.escape( + "Array API does not support LU factorization, falling back to QR instead. Set" + " `power_iteration_normalizer='QR'` explicitly to silence this warning." + ) + with pytest.warns(UserWarning, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py new file mode 100644 index 0000000000000000000000000000000000000000..4edb7d4a111094ce7c3ddafb8ef7a3024d76a964 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py @@ -0,0 +1,212 @@ +"""Test truncated SVD transformer.""" + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.decomposition import PCA, TruncatedSVD +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose, assert_array_less + +SVD_SOLVERS = ["arpack", "randomized"] + + +@pytest.fixture(scope="module") +def X_sparse(): + # Make an X that looks somewhat like a small tf-idf matrix. + rng = check_random_state(42) + X = sp.random(60, 55, density=0.2, format="csr", random_state=rng) + X.data[:] = 1 + np.log(X.data) + return X + + +@pytest.mark.parametrize("solver", ["randomized"]) +@pytest.mark.parametrize("kind", ("dense", "sparse")) +def test_solvers(X_sparse, solver, kind): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd_a = TruncatedSVD(30, algorithm="arpack") + svd = TruncatedSVD(30, algorithm=solver, random_state=42, n_oversamples=100) + + Xa = svd_a.fit_transform(X)[:, :6] + Xr = svd.fit_transform(X)[:, :6] + assert_allclose(Xa, Xr, rtol=2e-3) + + comp_a = np.abs(svd_a.components_) + comp = np.abs(svd.components_) + # All elements are equal, but some elements are more equal than others. + assert_allclose(comp_a[:9], comp[:9], rtol=1e-3) + assert_allclose(comp_a[9:], comp[9:], atol=1e-2) + + +@pytest.mark.parametrize("n_components", (10, 25, 41, 55)) +def test_attributes(n_components, X_sparse): + n_features = X_sparse.shape[1] + tsvd = TruncatedSVD(n_components).fit(X_sparse) + assert tsvd.n_components == n_components + assert tsvd.components_.shape == (n_components, n_features) + + +@pytest.mark.parametrize( + "algorithm, n_components", + [ + ("arpack", 55), + ("arpack", 56), + ("randomized", 56), + ], +) +def test_too_many_components(X_sparse, algorithm, n_components): + tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm) + with pytest.raises(ValueError): + tsvd.fit(X_sparse) + + +@pytest.mark.parametrize("fmt", ("array", "csr", "csc", "coo", "lil")) +def test_sparse_formats(fmt, X_sparse): + n_samples = X_sparse.shape[0] + Xfmt = X_sparse.toarray() if fmt == "dense" else getattr(X_sparse, "to" + fmt)() + tsvd = TruncatedSVD(n_components=11) + Xtrans = tsvd.fit_transform(Xfmt) + assert Xtrans.shape == (n_samples, 11) + Xtrans = tsvd.transform(Xfmt) + assert Xtrans.shape == (n_samples, 11) + + +@pytest.mark.parametrize("algo", SVD_SOLVERS) +def test_inverse_transform(algo, X_sparse): + # We need a lot of components for the reconstruction to be "almost + # equal" in all positions. XXX Test means or sums instead? + tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo) + Xt = tsvd.fit_transform(X_sparse) + Xinv = tsvd.inverse_transform(Xt) + assert_allclose(Xinv, X_sparse.toarray(), rtol=1e-1, atol=2e-1) + + +def test_integers(X_sparse): + n_samples = X_sparse.shape[0] + Xint = X_sparse.astype(np.int64) + tsvd = TruncatedSVD(n_components=6) + Xtrans = tsvd.fit_transform(Xint) + assert Xtrans.shape == (n_samples, tsvd.n_components) + + +@pytest.mark.parametrize("kind", ("dense", "sparse")) +@pytest.mark.parametrize("n_components", [10, 20]) +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_explained_variance(X_sparse, kind, n_components, solver): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd = TruncatedSVD(n_components, algorithm=solver) + X_tr = svd.fit_transform(X) + # Assert that all the values are greater than 0 + assert_array_less(0.0, svd.explained_variance_ratio_) + + # Assert that total explained variance is less than 1 + assert_array_less(svd.explained_variance_ratio_.sum(), 1.0) + + # Test that explained_variance is correct + total_variance = np.var(X_sparse.toarray(), axis=0).sum() + variances = np.var(X_tr, axis=0) + true_explained_variance_ratio = variances / total_variance + + assert_allclose( + svd.explained_variance_ratio_, + true_explained_variance_ratio, + ) + + +@pytest.mark.parametrize("kind", ("dense", "sparse")) +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_explained_variance_components_10_20(X_sparse, kind, solver): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd_10 = TruncatedSVD(10, algorithm=solver, n_iter=10).fit(X) + svd_20 = TruncatedSVD(20, algorithm=solver, n_iter=10).fit(X) + + # Assert the 1st component is equal + assert_allclose( + svd_10.explained_variance_ratio_, + svd_20.explained_variance_ratio_[:10], + rtol=5e-3, + ) + + # Assert that 20 components has higher explained variance than 10 + assert ( + svd_20.explained_variance_ratio_.sum() > svd_10.explained_variance_ratio_.sum() + ) + + +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_singular_values_consistency(solver): + # Check that the TruncatedSVD output has the correct singular values + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca = TruncatedSVD(n_components=2, algorithm=solver, random_state=rng).fit(X) + + # Compare to the Frobenius norm + X_pca = pca.transform(X) + assert_allclose( + np.sum(pca.singular_values_**2.0), + np.linalg.norm(X_pca, "fro") ** 2.0, + rtol=1e-2, + ) + + # Compare to the 2-norms of the score vectors + assert_allclose( + pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), rtol=1e-2 + ) + + +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_singular_values_expected(solver): + # Set the singular values and see what we get back + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 110 + + X = rng.randn(n_samples, n_features) + + pca = TruncatedSVD(n_components=3, algorithm=solver, random_state=rng) + X_pca = pca.fit_transform(X) + + X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) + X_pca[:, 0] *= 3.142 + X_pca[:, 1] *= 2.718 + + X_hat_pca = np.dot(X_pca, pca.components_) + pca.fit(X_hat_pca) + assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0], rtol=1e-14) + + +def test_truncated_svd_eq_pca(X_sparse): + # TruncatedSVD should be equal to PCA on centered data + + X_dense = X_sparse.toarray() + + X_c = X_dense - X_dense.mean(axis=0) + + params = dict(n_components=10, random_state=42) + + svd = TruncatedSVD(algorithm="arpack", **params) + pca = PCA(svd_solver="arpack", **params) + + Xt_svd = svd.fit_transform(X_c) + Xt_pca = pca.fit_transform(X_c) + + assert_allclose(Xt_svd, Xt_pca, rtol=1e-9) + assert_allclose(pca.mean_, 0, atol=1e-9) + assert_allclose(svd.components_, pca.components_) + + +@pytest.mark.parametrize( + "algorithm, tol", [("randomized", 0.0), ("arpack", 1e-6), ("arpack", 0.0)] +) +@pytest.mark.parametrize("kind", ("dense", "sparse")) +def test_fit_transform(X_sparse, algorithm, tol, kind): + # fit_transform(X) should equal fit(X).transform(X) + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd = TruncatedSVD( + n_components=5, n_iter=7, random_state=42, algorithm=algorithm, tol=tol + ) + X_transformed_1 = svd.fit_transform(X) + X_transformed_2 = svd.fit(X).transform(X) + assert_allclose(X_transformed_1, X_transformed_2) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__init__.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a3756bdaf1d3c7f7a5145b7287ae4e56809b2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/__init__.py @@ -0,0 +1,44 @@ +""" +The :mod:`sklearn.ensemble` module includes ensemble-based methods for +classification, regression and anomaly detection. +""" +from ._bagging import BaggingClassifier, BaggingRegressor +from ._base import BaseEnsemble +from ._forest import ( + ExtraTreesClassifier, + ExtraTreesRegressor, + RandomForestClassifier, + RandomForestRegressor, + RandomTreesEmbedding, +) +from ._gb import GradientBoostingClassifier, GradientBoostingRegressor +from ._hist_gradient_boosting.gradient_boosting import ( + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, +) +from ._iforest import IsolationForest +from ._stacking import StackingClassifier, StackingRegressor +from ._voting import VotingClassifier, VotingRegressor +from ._weight_boosting import AdaBoostClassifier, AdaBoostRegressor + +__all__ = [ + "BaseEnsemble", + "RandomForestClassifier", + "RandomForestRegressor", + "RandomTreesEmbedding", + "ExtraTreesClassifier", + "ExtraTreesRegressor", + "BaggingClassifier", + "BaggingRegressor", + "IsolationForest", + "GradientBoostingClassifier", + "GradientBoostingRegressor", + "AdaBoostClassifier", + "AdaBoostRegressor", + "VotingClassifier", + "VotingRegressor", + "StackingClassifier", + "StackingRegressor", + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_bagging.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_bagging.py new file mode 100644 index 0000000000000000000000000000000000000000..da340ceec6fe47dd0abaf945291de3afdc000cb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_bagging.py @@ -0,0 +1,1242 @@ +"""Bagging meta-estimator.""" + +# Author: Gilles Louppe +# License: BSD 3 clause + + +import itertools +import numbers +from abc import ABCMeta, abstractmethod +from functools import partial +from numbers import Integral +from warnings import warn + +import numpy as np + +from ..base import ClassifierMixin, RegressorMixin, _fit_context +from ..metrics import accuracy_score, r2_score +from ..tree import DecisionTreeClassifier, DecisionTreeRegressor +from ..utils import check_random_state, column_or_1d, indices_to_mask +from ..utils._param_validation import HasMethods, Interval, RealNotInt +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.metaestimators import available_if +from ..utils.multiclass import check_classification_targets +from ..utils.parallel import Parallel, delayed +from ..utils.random import sample_without_replacement +from ..utils.validation import _check_sample_weight, check_is_fitted, has_fit_parameter +from ._base import BaseEnsemble, _partition_estimators + +__all__ = ["BaggingClassifier", "BaggingRegressor"] + +MAX_INT = np.iinfo(np.int32).max + + +def _generate_indices(random_state, bootstrap, n_population, n_samples): + """Draw randomly sampled indices.""" + # Draw sample indices + if bootstrap: + indices = random_state.randint(0, n_population, n_samples) + else: + indices = sample_without_replacement( + n_population, n_samples, random_state=random_state + ) + + return indices + + +def _generate_bagging_indices( + random_state, + bootstrap_features, + bootstrap_samples, + n_features, + n_samples, + max_features, + max_samples, +): + """Randomly draw feature and sample indices.""" + # Get valid random state + random_state = check_random_state(random_state) + + # Draw indices + feature_indices = _generate_indices( + random_state, bootstrap_features, n_features, max_features + ) + sample_indices = _generate_indices( + random_state, bootstrap_samples, n_samples, max_samples + ) + + return feature_indices, sample_indices + + +def _parallel_build_estimators( + n_estimators, + ensemble, + X, + y, + sample_weight, + seeds, + total_n_estimators, + verbose, + check_input, +): + """Private function used to build a batch of estimators within a job.""" + # Retrieve settings + n_samples, n_features = X.shape + max_features = ensemble._max_features + max_samples = ensemble._max_samples + bootstrap = ensemble.bootstrap + bootstrap_features = ensemble.bootstrap_features + support_sample_weight = has_fit_parameter(ensemble.estimator_, "sample_weight") + has_check_input = has_fit_parameter(ensemble.estimator_, "check_input") + requires_feature_indexing = bootstrap_features or max_features != n_features + + if not support_sample_weight and sample_weight is not None: + raise ValueError("The base estimator doesn't support sample weight") + + # Build estimators + estimators = [] + estimators_features = [] + + for i in range(n_estimators): + if verbose > 1: + print( + "Building estimator %d of %d for this parallel run (total %d)..." + % (i + 1, n_estimators, total_n_estimators) + ) + + random_state = seeds[i] + estimator = ensemble._make_estimator(append=False, random_state=random_state) + + if has_check_input: + estimator_fit = partial(estimator.fit, check_input=check_input) + else: + estimator_fit = estimator.fit + + # Draw random feature, sample indices + features, indices = _generate_bagging_indices( + random_state, + bootstrap_features, + bootstrap, + n_features, + n_samples, + max_features, + max_samples, + ) + + # Draw samples, using sample weights, and then fit + if support_sample_weight: + if sample_weight is None: + curr_sample_weight = np.ones((n_samples,)) + else: + curr_sample_weight = sample_weight.copy() + + if bootstrap: + sample_counts = np.bincount(indices, minlength=n_samples) + curr_sample_weight *= sample_counts + else: + not_indices_mask = ~indices_to_mask(indices, n_samples) + curr_sample_weight[not_indices_mask] = 0 + + X_ = X[:, features] if requires_feature_indexing else X + estimator_fit(X_, y, sample_weight=curr_sample_weight) + else: + X_ = X[indices][:, features] if requires_feature_indexing else X[indices] + estimator_fit(X_, y[indices]) + + estimators.append(estimator) + estimators_features.append(features) + + return estimators, estimators_features + + +def _parallel_predict_proba(estimators, estimators_features, X, n_classes): + """Private function used to compute (proba-)predictions within a job.""" + n_samples = X.shape[0] + proba = np.zeros((n_samples, n_classes)) + + for estimator, features in zip(estimators, estimators_features): + if hasattr(estimator, "predict_proba"): + proba_estimator = estimator.predict_proba(X[:, features]) + + if n_classes == len(estimator.classes_): + proba += proba_estimator + + else: + proba[:, estimator.classes_] += proba_estimator[ + :, range(len(estimator.classes_)) + ] + + else: + # Resort to voting + predictions = estimator.predict(X[:, features]) + + for i in range(n_samples): + proba[i, predictions[i]] += 1 + + return proba + + +def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes): + """Private function used to compute log probabilities within a job.""" + n_samples = X.shape[0] + log_proba = np.empty((n_samples, n_classes)) + log_proba.fill(-np.inf) + all_classes = np.arange(n_classes, dtype=int) + + for estimator, features in zip(estimators, estimators_features): + log_proba_estimator = estimator.predict_log_proba(X[:, features]) + + if n_classes == len(estimator.classes_): + log_proba = np.logaddexp(log_proba, log_proba_estimator) + + else: + log_proba[:, estimator.classes_] = np.logaddexp( + log_proba[:, estimator.classes_], + log_proba_estimator[:, range(len(estimator.classes_))], + ) + + missing = np.setdiff1d(all_classes, estimator.classes_) + log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf) + + return log_proba + + +def _parallel_decision_function(estimators, estimators_features, X): + """Private function used to compute decisions within a job.""" + return sum( + estimator.decision_function(X[:, features]) + for estimator, features in zip(estimators, estimators_features) + ) + + +def _parallel_predict_regression(estimators, estimators_features, X): + """Private function used to compute predictions within a job.""" + return sum( + estimator.predict(X[:, features]) + for estimator, features in zip(estimators, estimators_features) + ) + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the first fitted estimator if available, otherwise we + check the estimator attribute. + """ + + def check(self): + if hasattr(self, "estimators_"): + return hasattr(self.estimators_[0], attr) + else: # self.estimator is not None + return hasattr(self.estimator, attr) + + return check + + +class BaseBagging(BaseEnsemble, metaclass=ABCMeta): + """Base class for Bagging meta-estimator. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "predict"]), None], + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "max_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + "max_features": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + "bootstrap": ["boolean"], + "bootstrap_features": ["boolean"], + "oob_score": ["boolean"], + "warm_start": ["boolean"], + "n_jobs": [None, Integral], + "random_state": ["random_state"], + "verbose": ["verbose"], + } + + @abstractmethod + def __init__( + self, + estimator=None, + n_estimators=10, + *, + max_samples=1.0, + max_features=1.0, + bootstrap=True, + bootstrap_features=False, + oob_score=False, + warm_start=False, + n_jobs=None, + random_state=None, + verbose=0, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + ) + self.max_samples = max_samples + self.max_features = max_features + self.bootstrap = bootstrap + self.bootstrap_features = bootstrap_features + self.oob_score = oob_score + self.warm_start = warm_start + self.n_jobs = n_jobs + self.random_state = random_state + self.verbose = verbose + + @_fit_context( + # BaseBagging.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Build a Bagging ensemble of estimators from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrices are accepted only if + they are supported by the base estimator. + + y : array-like of shape (n_samples,) + The target values (class labels in classification, real numbers in + regression). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if the base estimator supports + sample weighting. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + # Convert data (X is required to be 2d and indexable) + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc"], + dtype=None, + force_all_finite=False, + multi_output=True, + ) + return self._fit(X, y, self.max_samples, sample_weight=sample_weight) + + def _parallel_args(self): + return {} + + def _fit( + self, + X, + y, + max_samples=None, + max_depth=None, + sample_weight=None, + check_input=True, + ): + """Build a Bagging ensemble of estimators from the training + set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrices are accepted only if + they are supported by the base estimator. + + y : array-like of shape (n_samples,) + The target values (class labels in classification, real numbers in + regression). + + max_samples : int or float, default=None + Argument to use instead of self.max_samples. + + max_depth : int, default=None + Override value used when constructing base estimator. Only + supported if the base estimator has a max_depth parameter. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if the base estimator supports + sample weighting. + + check_input : bool, default=True + Override value used when fitting base estimator. Only supported + if the base estimator has a check_input parameter for fit function. + + Returns + ------- + self : object + Fitted estimator. + """ + random_state = check_random_state(self.random_state) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=None) + + # Remap output + n_samples = X.shape[0] + self._n_samples = n_samples + y = self._validate_y(y) + + # Check parameters + self._validate_estimator() + + if max_depth is not None: + self.estimator_.max_depth = max_depth + + # Validate max_samples + if max_samples is None: + max_samples = self.max_samples + elif not isinstance(max_samples, numbers.Integral): + max_samples = int(max_samples * X.shape[0]) + + if max_samples > X.shape[0]: + raise ValueError("max_samples must be <= n_samples") + + # Store validated integer row sampling value + self._max_samples = max_samples + + # Validate max_features + if isinstance(self.max_features, numbers.Integral): + max_features = self.max_features + elif isinstance(self.max_features, float): + max_features = int(self.max_features * self.n_features_in_) + + if max_features > self.n_features_in_: + raise ValueError("max_features must be <= n_features") + + max_features = max(1, int(max_features)) + + # Store validated integer feature sampling value + self._max_features = max_features + + # Other checks + if not self.bootstrap and self.oob_score: + raise ValueError("Out of bag estimation only available if bootstrap=True") + + if self.warm_start and self.oob_score: + raise ValueError("Out of bag estimate only available if warm_start=False") + + if hasattr(self, "oob_score_") and self.warm_start: + del self.oob_score_ + + if not self.warm_start or not hasattr(self, "estimators_"): + # Free allocated memory, if any + self.estimators_ = [] + self.estimators_features_ = [] + + n_more_estimators = self.n_estimators - len(self.estimators_) + + if n_more_estimators < 0: + raise ValueError( + "n_estimators=%d must be larger or equal to " + "len(estimators_)=%d when warm_start==True" + % (self.n_estimators, len(self.estimators_)) + ) + + elif n_more_estimators == 0: + warn( + "Warm-start fitting without increasing n_estimators does not " + "fit new trees." + ) + return self + + # Parallel loop + n_jobs, n_estimators, starts = _partition_estimators( + n_more_estimators, self.n_jobs + ) + total_n_estimators = sum(n_estimators) + + # Advance random state to state after training + # the first n_estimators + if self.warm_start and len(self.estimators_) > 0: + random_state.randint(MAX_INT, size=len(self.estimators_)) + + seeds = random_state.randint(MAX_INT, size=n_more_estimators) + self._seeds = seeds + + all_results = Parallel( + n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args() + )( + delayed(_parallel_build_estimators)( + n_estimators[i], + self, + X, + y, + sample_weight, + seeds[starts[i] : starts[i + 1]], + total_n_estimators, + verbose=self.verbose, + check_input=check_input, + ) + for i in range(n_jobs) + ) + + # Reduce + self.estimators_ += list( + itertools.chain.from_iterable(t[0] for t in all_results) + ) + self.estimators_features_ += list( + itertools.chain.from_iterable(t[1] for t in all_results) + ) + + if self.oob_score: + self._set_oob_score(X, y) + + return self + + @abstractmethod + def _set_oob_score(self, X, y): + """Calculate out of bag predictions and score.""" + + def _validate_y(self, y): + if len(y.shape) == 1 or y.shape[1] == 1: + return column_or_1d(y, warn=True) + return y + + def _get_estimators_indices(self): + # Get drawn indices along both sample and feature axes + for seed in self._seeds: + # Operations accessing random_state must be performed identically + # to those in `_parallel_build_estimators()` + feature_indices, sample_indices = _generate_bagging_indices( + seed, + self.bootstrap_features, + self.bootstrap, + self.n_features_in_, + self._n_samples, + self._max_features, + self._max_samples, + ) + + yield feature_indices, sample_indices + + @property + def estimators_samples_(self): + """ + The subset of drawn samples for each base estimator. + + Returns a dynamically generated list of indices identifying + the samples used for fitting each member of the ensemble, i.e., + the in-bag samples. + + Note: the list is re-created at each call to the property in order + to reduce the object memory footprint by not storing the sampling + data. Thus fetching the property may be slower than expected. + """ + return [sample_indices for _, sample_indices in self._get_estimators_indices()] + + +class BaggingClassifier(_RoutingNotSupportedMixin, ClassifierMixin, BaseBagging): + """A Bagging classifier. + + A Bagging classifier is an ensemble meta-estimator that fits base + classifiers each on random subsets of the original dataset and then + aggregate their individual predictions (either by voting or by averaging) + to form a final prediction. Such a meta-estimator can typically be used as + a way to reduce the variance of a black-box estimator (e.g., a decision + tree), by introducing randomization into its construction procedure and + then making an ensemble out of it. + + This algorithm encompasses several works from the literature. When random + subsets of the dataset are drawn as random subsets of the samples, then + this algorithm is known as Pasting [1]_. If samples are drawn with + replacement, then the method is known as Bagging [2]_. When random subsets + of the dataset are drawn as random subsets of the features, then the method + is known as Random Subspaces [3]_. Finally, when base estimators are built + on subsets of both samples and features, then the method is known as + Random Patches [4]_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + estimator : object, default=None + The base estimator to fit on random subsets of the dataset. + If None, then the base estimator is a + :class:`~sklearn.tree.DecisionTreeClassifier`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=10 + The number of base estimators in the ensemble. + + max_samples : int or float, default=1.0 + The number of samples to draw from X to train each base estimator (with + replacement by default, see `bootstrap` for more details). + + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + + max_features : int or float, default=1.0 + The number of features to draw from X to train each base estimator ( + without replacement by default, see `bootstrap_features` for more + details). + + - If int, then draw `max_features` features. + - If float, then draw `max(1, int(max_features * n_features_in_))` features. + + bootstrap : bool, default=True + Whether samples are drawn with replacement. If False, sampling + without replacement is performed. + + bootstrap_features : bool, default=False + Whether features are drawn with replacement. + + oob_score : bool, default=False + Whether to use out-of-bag samples to estimate + the generalization error. Only available if bootstrap=True. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit + a whole new ensemble. See :term:`the Glossary `. + + .. versionadded:: 0.17 + *warm_start* constructor parameter. + + n_jobs : int, default=None + The number of jobs to run in parallel for both :meth:`fit` and + :meth:`predict`. ``None`` means 1 unless in a + :obj:`joblib.parallel_backend` context. ``-1`` means using all + processors. See :term:`Glossary ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls the random resampling of the original dataset + (sample wise and feature wise). + If the base estimator accepts a `random_state` attribute, a different + seed is generated for each instance in the ensemble. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + verbose : int, default=0 + Controls the verbosity when fitting and predicting. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + estimators_ : list of estimators + The collection of fitted base estimators. + + estimators_samples_ : list of arrays + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. Each subset is defined by an array of the indices selected. + + estimators_features_ : list of arrays + The subset of drawn features for each base estimator. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_classes_ : int or list + The number of classes. + + oob_score_ : float + Score of the training dataset obtained using an out-of-bag estimate. + This attribute exists only when ``oob_score`` is True. + + oob_decision_function_ : ndarray of shape (n_samples, n_classes) + Decision function computed with out-of-bag estimate on the training + set. If n_estimators is small it might be possible that a data point + was never left out during the bootstrap. In this case, + `oob_decision_function_` might contain NaN. This attribute exists + only when ``oob_score`` is True. + + See Also + -------- + BaggingRegressor : A Bagging regressor. + + References + ---------- + + .. [1] L. Breiman, "Pasting small votes for classification in large + databases and on-line", Machine Learning, 36(1), 85-103, 1999. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140, + 1996. + + .. [3] T. Ho, "The random subspace method for constructing decision + forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844, + 1998. + + .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine + Learning and Knowledge Discovery in Databases, 346-361, 2012. + + Examples + -------- + >>> from sklearn.svm import SVC + >>> from sklearn.ensemble import BaggingClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=100, n_features=4, + ... n_informative=2, n_redundant=0, + ... random_state=0, shuffle=False) + >>> clf = BaggingClassifier(estimator=SVC(), + ... n_estimators=10, random_state=0).fit(X, y) + >>> clf.predict([[0, 0, 0, 0]]) + array([1]) + """ + + def __init__( + self, + estimator=None, + n_estimators=10, + *, + max_samples=1.0, + max_features=1.0, + bootstrap=True, + bootstrap_features=False, + oob_score=False, + warm_start=False, + n_jobs=None, + random_state=None, + verbose=0, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + max_samples=max_samples, + max_features=max_features, + bootstrap=bootstrap, + bootstrap_features=bootstrap_features, + oob_score=oob_score, + warm_start=warm_start, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + ) + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeClassifier()) + + def _set_oob_score(self, X, y): + n_samples = y.shape[0] + n_classes_ = self.n_classes_ + + predictions = np.zeros((n_samples, n_classes_)) + + for estimator, samples, features in zip( + self.estimators_, self.estimators_samples_, self.estimators_features_ + ): + # Create mask for OOB samples + mask = ~indices_to_mask(samples, n_samples) + + if hasattr(estimator, "predict_proba"): + predictions[mask, :] += estimator.predict_proba( + (X[mask, :])[:, features] + ) + + else: + p = estimator.predict((X[mask, :])[:, features]) + j = 0 + + for i in range(n_samples): + if mask[i]: + predictions[i, p[j]] += 1 + j += 1 + + if (predictions.sum(axis=1) == 0).any(): + warn( + "Some inputs do not have OOB scores. " + "This probably means too few estimators were used " + "to compute any reliable oob estimates." + ) + + oob_decision_function = predictions / predictions.sum(axis=1)[:, np.newaxis] + oob_score = accuracy_score(y, np.argmax(predictions, axis=1)) + + self.oob_decision_function_ = oob_decision_function + self.oob_score_ = oob_score + + def _validate_y(self, y): + y = column_or_1d(y, warn=True) + check_classification_targets(y) + self.classes_, y = np.unique(y, return_inverse=True) + self.n_classes_ = len(self.classes_) + + return y + + def predict(self, X): + """Predict class for X. + + The predicted class of an input sample is computed as the class with + the highest mean predicted probability. If base estimators do not + implement a ``predict_proba`` method, then it resorts to voting. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrices are accepted only if + they are supported by the base estimator. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted classes. + """ + predicted_probabilitiy = self.predict_proba(X) + return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0) + + def predict_proba(self, X): + """Predict class probabilities for X. + + The predicted class probabilities of an input sample is computed as + the mean predicted class probabilities of the base estimators in the + ensemble. If base estimators do not implement a ``predict_proba`` + method, then it resorts to voting and the predicted class probabilities + of an input sample represents the proportion of estimators predicting + each class. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrices are accepted only if + they are supported by the base estimator. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + # Check data + X = self._validate_data( + X, + accept_sparse=["csr", "csc"], + dtype=None, + force_all_finite=False, + reset=False, + ) + + # Parallel loop + n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs) + + all_proba = Parallel( + n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args() + )( + delayed(_parallel_predict_proba)( + self.estimators_[starts[i] : starts[i + 1]], + self.estimators_features_[starts[i] : starts[i + 1]], + X, + self.n_classes_, + ) + for i in range(n_jobs) + ) + + # Reduce + proba = sum(all_proba) / self.n_estimators + + return proba + + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + The predicted class log-probabilities of an input sample is computed as + the log of the mean predicted class probabilities of the base + estimators in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrices are accepted only if + they are supported by the base estimator. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class log-probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + if hasattr(self.estimator_, "predict_log_proba"): + # Check data + X = self._validate_data( + X, + accept_sparse=["csr", "csc"], + dtype=None, + force_all_finite=False, + reset=False, + ) + + # Parallel loop + n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs) + + all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)( + delayed(_parallel_predict_log_proba)( + self.estimators_[starts[i] : starts[i + 1]], + self.estimators_features_[starts[i] : starts[i + 1]], + X, + self.n_classes_, + ) + for i in range(n_jobs) + ) + + # Reduce + log_proba = all_log_proba[0] + + for j in range(1, len(all_log_proba)): + log_proba = np.logaddexp(log_proba, all_log_proba[j]) + + log_proba -= np.log(self.n_estimators) + + else: + log_proba = np.log(self.predict_proba(X)) + + return log_proba + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Average of the decision functions of the base classifiers. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrices are accepted only if + they are supported by the base estimator. + + Returns + ------- + score : ndarray of shape (n_samples, k) + The decision function of the input samples. The columns correspond + to the classes in sorted order, as they appear in the attribute + ``classes_``. Regression and binary classification are special + cases with ``k == 1``, otherwise ``k==n_classes``. + """ + check_is_fitted(self) + + # Check data + X = self._validate_data( + X, + accept_sparse=["csr", "csc"], + dtype=None, + force_all_finite=False, + reset=False, + ) + + # Parallel loop + n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs) + + all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)( + delayed(_parallel_decision_function)( + self.estimators_[starts[i] : starts[i + 1]], + self.estimators_features_[starts[i] : starts[i + 1]], + X, + ) + for i in range(n_jobs) + ) + + # Reduce + decisions = sum(all_decisions) / self.n_estimators + + return decisions + + def _more_tags(self): + if self.estimator is None: + estimator = DecisionTreeClassifier() + else: + estimator = self.estimator + + return {"allow_nan": _safe_tags(estimator, "allow_nan")} + + +class BaggingRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseBagging): + """A Bagging regressor. + + A Bagging regressor is an ensemble meta-estimator that fits base + regressors each on random subsets of the original dataset and then + aggregate their individual predictions (either by voting or by averaging) + to form a final prediction. Such a meta-estimator can typically be used as + a way to reduce the variance of a black-box estimator (e.g., a decision + tree), by introducing randomization into its construction procedure and + then making an ensemble out of it. + + This algorithm encompasses several works from the literature. When random + subsets of the dataset are drawn as random subsets of the samples, then + this algorithm is known as Pasting [1]_. If samples are drawn with + replacement, then the method is known as Bagging [2]_. When random subsets + of the dataset are drawn as random subsets of the features, then the method + is known as Random Subspaces [3]_. Finally, when base estimators are built + on subsets of both samples and features, then the method is known as + Random Patches [4]_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + estimator : object, default=None + The base estimator to fit on random subsets of the dataset. + If None, then the base estimator is a + :class:`~sklearn.tree.DecisionTreeRegressor`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=10 + The number of base estimators in the ensemble. + + max_samples : int or float, default=1.0 + The number of samples to draw from X to train each base estimator (with + replacement by default, see `bootstrap` for more details). + + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + + max_features : int or float, default=1.0 + The number of features to draw from X to train each base estimator ( + without replacement by default, see `bootstrap_features` for more + details). + + - If int, then draw `max_features` features. + - If float, then draw `max(1, int(max_features * n_features_in_))` features. + + bootstrap : bool, default=True + Whether samples are drawn with replacement. If False, sampling + without replacement is performed. + + bootstrap_features : bool, default=False + Whether features are drawn with replacement. + + oob_score : bool, default=False + Whether to use out-of-bag samples to estimate + the generalization error. Only available if bootstrap=True. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit + a whole new ensemble. See :term:`the Glossary `. + + n_jobs : int, default=None + The number of jobs to run in parallel for both :meth:`fit` and + :meth:`predict`. ``None`` means 1 unless in a + :obj:`joblib.parallel_backend` context. ``-1`` means using all + processors. See :term:`Glossary ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls the random resampling of the original dataset + (sample wise and feature wise). + If the base estimator accepts a `random_state` attribute, a different + seed is generated for each instance in the ensemble. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + verbose : int, default=0 + Controls the verbosity when fitting and predicting. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + estimators_ : list of estimators + The collection of fitted sub-estimators. + + estimators_samples_ : list of arrays + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. Each subset is defined by an array of the indices selected. + + estimators_features_ : list of arrays + The subset of drawn features for each base estimator. + + oob_score_ : float + Score of the training dataset obtained using an out-of-bag estimate. + This attribute exists only when ``oob_score`` is True. + + oob_prediction_ : ndarray of shape (n_samples,) + Prediction computed with out-of-bag estimate on the training + set. If n_estimators is small it might be possible that a data point + was never left out during the bootstrap. In this case, + `oob_prediction_` might contain NaN. This attribute exists only + when ``oob_score`` is True. + + See Also + -------- + BaggingClassifier : A Bagging classifier. + + References + ---------- + + .. [1] L. Breiman, "Pasting small votes for classification in large + databases and on-line", Machine Learning, 36(1), 85-103, 1999. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140, + 1996. + + .. [3] T. Ho, "The random subspace method for constructing decision + forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844, + 1998. + + .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine + Learning and Knowledge Discovery in Databases, 346-361, 2012. + + Examples + -------- + >>> from sklearn.svm import SVR + >>> from sklearn.ensemble import BaggingRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_samples=100, n_features=4, + ... n_informative=2, n_targets=1, + ... random_state=0, shuffle=False) + >>> regr = BaggingRegressor(estimator=SVR(), + ... n_estimators=10, random_state=0).fit(X, y) + >>> regr.predict([[0, 0, 0, 0]]) + array([-2.8720...]) + """ + + def __init__( + self, + estimator=None, + n_estimators=10, + *, + max_samples=1.0, + max_features=1.0, + bootstrap=True, + bootstrap_features=False, + oob_score=False, + warm_start=False, + n_jobs=None, + random_state=None, + verbose=0, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + max_samples=max_samples, + max_features=max_features, + bootstrap=bootstrap, + bootstrap_features=bootstrap_features, + oob_score=oob_score, + warm_start=warm_start, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + ) + + def predict(self, X): + """Predict regression target for X. + + The predicted regression target of an input sample is computed as the + mean predicted regression targets of the estimators in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrices are accepted only if + they are supported by the base estimator. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted values. + """ + check_is_fitted(self) + # Check data + X = self._validate_data( + X, + accept_sparse=["csr", "csc"], + dtype=None, + force_all_finite=False, + reset=False, + ) + + # Parallel loop + n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs) + + all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)( + delayed(_parallel_predict_regression)( + self.estimators_[starts[i] : starts[i + 1]], + self.estimators_features_[starts[i] : starts[i + 1]], + X, + ) + for i in range(n_jobs) + ) + + # Reduce + y_hat = sum(all_y_hat) / self.n_estimators + + return y_hat + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeRegressor()) + + def _set_oob_score(self, X, y): + n_samples = y.shape[0] + + predictions = np.zeros((n_samples,)) + n_predictions = np.zeros((n_samples,)) + + for estimator, samples, features in zip( + self.estimators_, self.estimators_samples_, self.estimators_features_ + ): + # Create mask for OOB samples + mask = ~indices_to_mask(samples, n_samples) + + predictions[mask] += estimator.predict((X[mask, :])[:, features]) + n_predictions[mask] += 1 + + if (n_predictions == 0).any(): + warn( + "Some inputs do not have OOB scores. " + "This probably means too few estimators were used " + "to compute any reliable oob estimates." + ) + n_predictions[n_predictions == 0] = 1 + + predictions /= n_predictions + + self.oob_prediction_ = predictions + self.oob_score_ = r2_score(y, predictions) + + def _more_tags(self): + if self.estimator is None: + estimator = DecisionTreeRegressor() + else: + estimator = self.estimator + return {"allow_nan": _safe_tags(estimator, "allow_nan")} diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_base.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa05d90975cd1693a879f570acaf2d4d8dbde7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_base.py @@ -0,0 +1,301 @@ +"""Base class for ensemble-based estimators.""" + +# Authors: Gilles Louppe +# License: BSD 3 clause + +from abc import ABCMeta, abstractmethod +from typing import List + +import numpy as np +from joblib import effective_n_jobs + +from ..base import BaseEstimator, MetaEstimatorMixin, clone, is_classifier, is_regressor +from ..utils import Bunch, _print_elapsed_time, check_random_state +from ..utils._tags import _safe_tags +from ..utils.metaestimators import _BaseComposition + + +def _fit_single_estimator( + estimator, X, y, sample_weight=None, message_clsname=None, message=None +): + """Private function used to fit an estimator within a job.""" + if sample_weight is not None: + try: + with _print_elapsed_time(message_clsname, message): + estimator.fit(X, y, sample_weight=sample_weight) + except TypeError as exc: + if "unexpected keyword argument 'sample_weight'" in str(exc): + raise TypeError( + "Underlying estimator {} does not support sample weights.".format( + estimator.__class__.__name__ + ) + ) from exc + raise + else: + with _print_elapsed_time(message_clsname, message): + estimator.fit(X, y) + return estimator + + +def _set_random_states(estimator, random_state=None): + """Set fixed random_state parameters for an estimator. + + Finds all parameters ending ``random_state`` and sets them to integers + derived from ``random_state``. + + Parameters + ---------- + estimator : estimator supporting get/set_params + Estimator with potential randomness managed by random_state + parameters. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the generation of the random + integers. Pass an int for reproducible output across multiple function + calls. + See :term:`Glossary `. + + Notes + ----- + This does not necessarily set *all* ``random_state`` attributes that + control an estimator's randomness, only those accessible through + ``estimator.get_params()``. ``random_state``s not controlled include + those belonging to: + + * cross-validation splitters + * ``scipy.stats`` rvs + """ + random_state = check_random_state(random_state) + to_set = {} + for key in sorted(estimator.get_params(deep=True)): + if key == "random_state" or key.endswith("__random_state"): + to_set[key] = random_state.randint(np.iinfo(np.int32).max) + + if to_set: + estimator.set_params(**to_set) + + +class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for all ensemble classes. + + Warning: This class should not be used directly. Use derived classes + instead. + + Parameters + ---------- + estimator : object + The base estimator from which the ensemble is built. + + n_estimators : int, default=10 + The number of estimators in the ensemble. + + estimator_params : list of str, default=tuple() + The list of attributes to use as parameters when instantiating a + new base estimator. If none are given, default parameters are used. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + estimators_ : list of estimators + The collection of fitted base estimators. + """ + + # overwrite _required_parameters from MetaEstimatorMixin + _required_parameters: List[str] = [] + + @abstractmethod + def __init__( + self, + estimator=None, + *, + n_estimators=10, + estimator_params=tuple(), + ): + # Set parameters + self.estimator = estimator + self.n_estimators = n_estimators + self.estimator_params = estimator_params + + # Don't instantiate estimators now! Parameters of estimator might + # still change. Eg., when grid-searching with the nested object syntax. + # self.estimators_ needs to be filled by the derived classes in fit. + + def _validate_estimator(self, default=None): + """Check the base estimator. + + Sets the `estimator_` attributes. + """ + if self.estimator is not None: + self.estimator_ = self.estimator + else: + self.estimator_ = default + + def _make_estimator(self, append=True, random_state=None): + """Make and configure a copy of the `estimator_` attribute. + + Warning: This method should be used to properly instantiate new + sub-estimators. + """ + estimator = clone(self.estimator_) + estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params}) + + if random_state is not None: + _set_random_states(estimator, random_state) + + if append: + self.estimators_.append(estimator) + + return estimator + + def __len__(self): + """Return the number of estimators in the ensemble.""" + return len(self.estimators_) + + def __getitem__(self, index): + """Return the index'th estimator in the ensemble.""" + return self.estimators_[index] + + def __iter__(self): + """Return iterator over estimators in the ensemble.""" + return iter(self.estimators_) + + +def _partition_estimators(n_estimators, n_jobs): + """Private function used to partition estimators between jobs.""" + # Compute the number of jobs + n_jobs = min(effective_n_jobs(n_jobs), n_estimators) + + # Partition estimators between jobs + n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int) + n_estimators_per_job[: n_estimators % n_jobs] += 1 + starts = np.cumsum(n_estimators_per_job) + + return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist() + + +class _BaseHeterogeneousEnsemble( + MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta +): + """Base class for heterogeneous ensemble of learners. + + Parameters + ---------- + estimators : list of (str, estimator) tuples + The ensemble of estimators to use in the ensemble. Each element of the + list is defined as a tuple of string (i.e. name of the estimator) and + an estimator instance. An estimator can be set to `'drop'` using + `set_params`. + + Attributes + ---------- + estimators_ : list of estimators + The elements of the estimators parameter, having been fitted on the + training data. If an estimator has been set to `'drop'`, it will not + appear in `estimators_`. + """ + + _required_parameters = ["estimators"] + + @property + def named_estimators(self): + """Dictionary to access any fitted sub-estimators by name. + + Returns + ------- + :class:`~sklearn.utils.Bunch` + """ + return Bunch(**dict(self.estimators)) + + @abstractmethod + def __init__(self, estimators): + self.estimators = estimators + + def _validate_estimators(self): + if len(self.estimators) == 0: + raise ValueError( + "Invalid 'estimators' attribute, 'estimators' should be a " + "non-empty list of (string, estimator) tuples." + ) + names, estimators = zip(*self.estimators) + # defined by MetaEstimatorMixin + self._validate_names(names) + + has_estimator = any(est != "drop" for est in estimators) + if not has_estimator: + raise ValueError( + "All estimators are dropped. At least one is required " + "to be an estimator." + ) + + is_estimator_type = is_classifier if is_classifier(self) else is_regressor + + for est in estimators: + if est != "drop" and not is_estimator_type(est): + raise ValueError( + "The estimator {} should be a {}.".format( + est.__class__.__name__, is_estimator_type.__name__[3:] + ) + ) + + return names, estimators + + def set_params(self, **params): + """ + Set the parameters of an estimator from the ensemble. + + Valid parameter keys can be listed with `get_params()`. Note that you + can directly set the parameters of the estimators contained in + `estimators`. + + Parameters + ---------- + **params : keyword arguments + Specific parameters using e.g. + `set_params(parameter_name=new_value)`. In addition, to setting the + parameters of the estimator, the individual estimator of the + estimators can also be set, or can be removed by setting them to + 'drop'. + + Returns + ------- + self : object + Estimator instance. + """ + super()._set_params("estimators", **params) + return self + + def get_params(self, deep=True): + """ + Get the parameters of an estimator from the ensemble. + + Returns the parameters given in the constructor as well as the + estimators contained within the `estimators` parameter. + + Parameters + ---------- + deep : bool, default=True + Setting it to True gets the various estimators and the parameters + of the estimators as well. + + Returns + ------- + params : dict + Parameter and estimator names mapped to their values or parameter + names mapped to their values. + """ + return super()._get_params("estimators", deep=deep) + + def _more_tags(self): + try: + allow_nan = all( + _safe_tags(est[1])["allow_nan"] if est[1] != "drop" else True + for est in self.estimators + ) + except Exception: + # If `estimators` does not comply with our API (list of tuples) then it will + # fail. In this case, we assume that `allow_nan` is False but the parameter + # validation will raise an error during `fit`. + allow_nan = False + return {"preserves_dtype": [], "allow_nan": allow_nan} diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_forest.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_forest.py new file mode 100644 index 0000000000000000000000000000000000000000..ba23e53e16a63a8681cf0fac26940dbd608dced6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_forest.py @@ -0,0 +1,2982 @@ +""" +Forest of trees-based ensemble methods. + +Those methods include random forests and extremely randomized trees. + +The module structure is the following: + +- The ``BaseForest`` base class implements a common ``fit`` method for all + the estimators in the module. The ``fit`` method of the base ``Forest`` + class calls the ``fit`` method of each sub-estimator on random samples + (with replacement, a.k.a. bootstrap) of the training set. + + The init of the sub-estimator is further delegated to the + ``BaseEnsemble`` constructor. + +- The ``ForestClassifier`` and ``ForestRegressor`` base classes further + implement the prediction logic by computing an average of the predicted + outcomes of the sub-estimators. + +- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived + classes provide the user with concrete implementations of + the forest ensemble method using classical, deterministic + ``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as + sub-estimator implementations. + +- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived + classes provide the user with concrete implementations of the + forest ensemble method using the extremely randomized trees + ``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as + sub-estimator implementations. + +Single and multi-output problems are both handled. +""" + +# Authors: Gilles Louppe +# Brian Holt +# Joly Arnaud +# Fares Hedayati +# +# License: BSD 3 clause + + +import threading +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real +from warnings import catch_warnings, simplefilter, warn + +import numpy as np +from scipy.sparse import hstack as sparse_hstack +from scipy.sparse import issparse + +from ..base import ( + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + TransformerMixin, + _fit_context, + is_classifier, +) +from ..exceptions import DataConversionWarning +from ..metrics import accuracy_score, r2_score +from ..preprocessing import OneHotEncoder +from ..tree import ( + BaseDecisionTree, + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) +from ..tree._tree import DOUBLE, DTYPE +from ..utils import check_random_state, compute_sample_weight +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils._tags import _safe_tags +from ..utils.multiclass import check_classification_targets, type_of_target +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_feature_names_in, + _check_sample_weight, + _num_samples, + check_is_fitted, +) +from ._base import BaseEnsemble, _partition_estimators + +__all__ = [ + "RandomForestClassifier", + "RandomForestRegressor", + "ExtraTreesClassifier", + "ExtraTreesRegressor", + "RandomTreesEmbedding", +] + +MAX_INT = np.iinfo(np.int32).max + + +def _get_n_samples_bootstrap(n_samples, max_samples): + """ + Get the number of samples in a bootstrap sample. + + Parameters + ---------- + n_samples : int + Number of samples in the dataset. + max_samples : int or float + The maximum number of samples to draw from the total available: + - if float, this indicates a fraction of the total and should be + the interval `(0.0, 1.0]`; + - if int, this indicates the exact number of samples; + - if None, this indicates the total number of samples. + + Returns + ------- + n_samples_bootstrap : int + The total number of samples to draw for the bootstrap sample. + """ + if max_samples is None: + return n_samples + + if isinstance(max_samples, Integral): + if max_samples > n_samples: + msg = "`max_samples` must be <= n_samples={} but got value {}" + raise ValueError(msg.format(n_samples, max_samples)) + return max_samples + + if isinstance(max_samples, Real): + return max(round(n_samples * max_samples), 1) + + +def _generate_sample_indices(random_state, n_samples, n_samples_bootstrap): + """ + Private function used to _parallel_build_trees function.""" + + random_instance = check_random_state(random_state) + sample_indices = random_instance.randint( + 0, n_samples, n_samples_bootstrap, dtype=np.int32 + ) + + return sample_indices + + +def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap): + """ + Private function used to forest._set_oob_score function.""" + sample_indices = _generate_sample_indices( + random_state, n_samples, n_samples_bootstrap + ) + sample_counts = np.bincount(sample_indices, minlength=n_samples) + unsampled_mask = sample_counts == 0 + indices_range = np.arange(n_samples) + unsampled_indices = indices_range[unsampled_mask] + + return unsampled_indices + + +def _parallel_build_trees( + tree, + bootstrap, + X, + y, + sample_weight, + tree_idx, + n_trees, + verbose=0, + class_weight=None, + n_samples_bootstrap=None, + missing_values_in_feature_mask=None, +): + """ + Private function used to fit a single tree in parallel.""" + if verbose > 1: + print("building tree %d of %d" % (tree_idx + 1, n_trees)) + + if bootstrap: + n_samples = X.shape[0] + if sample_weight is None: + curr_sample_weight = np.ones((n_samples,), dtype=np.float64) + else: + curr_sample_weight = sample_weight.copy() + + indices = _generate_sample_indices( + tree.random_state, n_samples, n_samples_bootstrap + ) + sample_counts = np.bincount(indices, minlength=n_samples) + curr_sample_weight *= sample_counts + + if class_weight == "subsample": + with catch_warnings(): + simplefilter("ignore", DeprecationWarning) + curr_sample_weight *= compute_sample_weight("auto", y, indices=indices) + elif class_weight == "balanced_subsample": + curr_sample_weight *= compute_sample_weight("balanced", y, indices=indices) + + tree._fit( + X, + y, + sample_weight=curr_sample_weight, + check_input=False, + missing_values_in_feature_mask=missing_values_in_feature_mask, + ) + else: + tree._fit( + X, + y, + sample_weight=sample_weight, + check_input=False, + missing_values_in_feature_mask=missing_values_in_feature_mask, + ) + + return tree + + +class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta): + """ + Base class for forests of trees. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + _parameter_constraints: dict = { + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "bootstrap": ["boolean"], + "oob_score": ["boolean", callable], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "verbose": ["verbose"], + "warm_start": ["boolean"], + "max_samples": [ + None, + Interval(RealNotInt, 0.0, 1.0, closed="right"), + Interval(Integral, 1, None, closed="left"), + ], + } + + @abstractmethod + def __init__( + self, + estimator, + n_estimators=100, + *, + estimator_params=tuple(), + bootstrap=False, + oob_score=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + class_weight=None, + max_samples=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + estimator_params=estimator_params, + ) + + self.bootstrap = bootstrap + self.oob_score = oob_score + self.n_jobs = n_jobs + self.random_state = random_state + self.verbose = verbose + self.warm_start = warm_start + self.class_weight = class_weight + self.max_samples = max_samples + + def apply(self, X): + """ + Apply trees in the forest to X, return leaf indices. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + X_leaves : ndarray of shape (n_samples, n_estimators) + For each datapoint x in X and for each tree in the forest, + return the index of the leaf x ends up in. + """ + X = self._validate_X_predict(X) + results = Parallel( + n_jobs=self.n_jobs, + verbose=self.verbose, + prefer="threads", + )(delayed(tree.apply)(X, check_input=False) for tree in self.estimators_) + + return np.array(results).T + + def decision_path(self, X): + """ + Return the decision path in the forest. + + .. versionadded:: 0.18 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + indicator : sparse matrix of shape (n_samples, n_nodes) + Return a node indicator matrix where non zero elements indicates + that the samples goes through the nodes. The matrix is of CSR + format. + + n_nodes_ptr : ndarray of shape (n_estimators + 1,) + The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]] + gives the indicator value for the i-th estimator. + """ + X = self._validate_X_predict(X) + indicators = Parallel( + n_jobs=self.n_jobs, + verbose=self.verbose, + prefer="threads", + )( + delayed(tree.decision_path)(X, check_input=False) + for tree in self.estimators_ + ) + + n_nodes = [0] + n_nodes.extend([i.shape[1] for i in indicators]) + n_nodes_ptr = np.array(n_nodes).cumsum() + + return sparse_hstack(indicators).tocsr(), n_nodes_ptr + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """ + Build a forest of trees from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Internally, its dtype will be converted + to ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csc_matrix``. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The target values (class labels in classification, real numbers in + regression). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. In the case of + classification, splits are also ignored if they would result in any + single class carrying a negative weight in either child node. + + Returns + ------- + self : object + Fitted estimator. + """ + # Validate or convert input data + if issparse(y): + raise ValueError("sparse multilabel-indicator for y is not supported.") + + X, y = self._validate_data( + X, + y, + multi_output=True, + accept_sparse="csc", + dtype=DTYPE, + force_all_finite=False, + ) + # _compute_missing_values_in_feature_mask checks if X has missing values and + # will raise an error if the underlying tree base estimator can't handle missing + # values. Only the criterion is required to determine if the tree supports + # missing values. + estimator = type(self.estimator)(criterion=self.criterion) + missing_values_in_feature_mask = ( + estimator._compute_missing_values_in_feature_mask( + X, estimator_name=self.__class__.__name__ + ) + ) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if issparse(X): + # Pre-sort indices to avoid that each individual tree of the + # ensemble sorts the indices. + X.sort_indices() + + y = np.atleast_1d(y) + if y.ndim == 2 and y.shape[1] == 1: + warn( + ( + "A column-vector y was passed when a 1d array was" + " expected. Please change the shape of y to " + "(n_samples,), for example using ravel()." + ), + DataConversionWarning, + stacklevel=2, + ) + + if y.ndim == 1: + # reshape is necessary to preserve the data contiguity against vs + # [:, np.newaxis] that does not. + y = np.reshape(y, (-1, 1)) + + if self.criterion == "poisson": + if np.any(y < 0): + raise ValueError( + "Some value(s) of y are negative which is " + "not allowed for Poisson regression." + ) + if np.sum(y) <= 0: + raise ValueError( + "Sum of y is not strictly positive which " + "is necessary for Poisson regression." + ) + + self._n_samples, self.n_outputs_ = y.shape + + y, expanded_class_weight = self._validate_y_class_weight(y) + + if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: + y = np.ascontiguousarray(y, dtype=DOUBLE) + + if expanded_class_weight is not None: + if sample_weight is not None: + sample_weight = sample_weight * expanded_class_weight + else: + sample_weight = expanded_class_weight + + if not self.bootstrap and self.max_samples is not None: + raise ValueError( + "`max_sample` cannot be set if `bootstrap=False`. " + "Either switch to `bootstrap=True` or set " + "`max_sample=None`." + ) + elif self.bootstrap: + n_samples_bootstrap = _get_n_samples_bootstrap( + n_samples=X.shape[0], max_samples=self.max_samples + ) + else: + n_samples_bootstrap = None + + self._n_samples_bootstrap = n_samples_bootstrap + + self._validate_estimator() + + if not self.bootstrap and self.oob_score: + raise ValueError("Out of bag estimation only available if bootstrap=True") + + random_state = check_random_state(self.random_state) + + if not self.warm_start or not hasattr(self, "estimators_"): + # Free allocated memory, if any + self.estimators_ = [] + + n_more_estimators = self.n_estimators - len(self.estimators_) + + if n_more_estimators < 0: + raise ValueError( + "n_estimators=%d must be larger or equal to " + "len(estimators_)=%d when warm_start==True" + % (self.n_estimators, len(self.estimators_)) + ) + + elif n_more_estimators == 0: + warn( + "Warm-start fitting without increasing n_estimators does not " + "fit new trees." + ) + else: + if self.warm_start and len(self.estimators_) > 0: + # We draw from the random state to get the random state we + # would have got if we hadn't used a warm_start. + random_state.randint(MAX_INT, size=len(self.estimators_)) + + trees = [ + self._make_estimator(append=False, random_state=random_state) + for i in range(n_more_estimators) + ] + + # Parallel loop: we prefer the threading backend as the Cython code + # for fitting the trees is internally releasing the Python GIL + # making threading more efficient than multiprocessing in + # that case. However, for joblib 0.12+ we respect any + # parallel_backend contexts set at a higher level, + # since correctness does not rely on using threads. + trees = Parallel( + n_jobs=self.n_jobs, + verbose=self.verbose, + prefer="threads", + )( + delayed(_parallel_build_trees)( + t, + self.bootstrap, + X, + y, + sample_weight, + i, + len(trees), + verbose=self.verbose, + class_weight=self.class_weight, + n_samples_bootstrap=n_samples_bootstrap, + missing_values_in_feature_mask=missing_values_in_feature_mask, + ) + for i, t in enumerate(trees) + ) + + # Collect newly grown trees + self.estimators_.extend(trees) + + if self.oob_score and ( + n_more_estimators > 0 or not hasattr(self, "oob_score_") + ): + y_type = type_of_target(y) + if y_type == "unknown" or ( + self._estimator_type == "classifier" + and y_type == "multiclass-multioutput" + ): + # FIXME: we could consider to support multiclass-multioutput if + # we introduce or reuse a constructor parameter (e.g. + # oob_score) allowing our user to pass a callable defining the + # scoring strategy on OOB sample. + raise ValueError( + "The type of target cannot be used to compute OOB " + f"estimates. Got {y_type} while only the following are " + "supported: continuous, continuous-multioutput, binary, " + "multiclass, multilabel-indicator." + ) + + if callable(self.oob_score): + self._set_oob_score_and_attributes( + X, y, scoring_function=self.oob_score + ) + else: + self._set_oob_score_and_attributes(X, y) + + # Decapsulate classes_ attributes + if hasattr(self, "classes_") and self.n_outputs_ == 1: + self.n_classes_ = self.n_classes_[0] + self.classes_ = self.classes_[0] + + return self + + @abstractmethod + def _set_oob_score_and_attributes(self, X, y, scoring_function=None): + """Compute and set the OOB score and attributes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + y : ndarray of shape (n_samples, n_outputs) + The target matrix. + scoring_function : callable, default=None + Scoring function for OOB score. Default depends on whether + this is a regression (R2 score) or classification problem + (accuracy score). + """ + + def _compute_oob_predictions(self, X, y): + """Compute and set the OOB score. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + y : ndarray of shape (n_samples, n_outputs) + The target matrix. + + Returns + ------- + oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or \ + (n_samples, 1, n_outputs) + The OOB predictions. + """ + # Prediction requires X to be in CSR format + if issparse(X): + X = X.tocsr() + + n_samples = y.shape[0] + n_outputs = self.n_outputs_ + if is_classifier(self) and hasattr(self, "n_classes_"): + # n_classes_ is a ndarray at this stage + # all the supported type of target will have the same number of + # classes in all outputs + oob_pred_shape = (n_samples, self.n_classes_[0], n_outputs) + else: + # for regression, n_classes_ does not exist and we create an empty + # axis to be consistent with the classification case and make + # the array operations compatible with the 2 settings + oob_pred_shape = (n_samples, 1, n_outputs) + + oob_pred = np.zeros(shape=oob_pred_shape, dtype=np.float64) + n_oob_pred = np.zeros((n_samples, n_outputs), dtype=np.int64) + + n_samples_bootstrap = _get_n_samples_bootstrap( + n_samples, + self.max_samples, + ) + for estimator in self.estimators_: + unsampled_indices = _generate_unsampled_indices( + estimator.random_state, + n_samples, + n_samples_bootstrap, + ) + + y_pred = self._get_oob_predictions(estimator, X[unsampled_indices, :]) + oob_pred[unsampled_indices, ...] += y_pred + n_oob_pred[unsampled_indices, :] += 1 + + for k in range(n_outputs): + if (n_oob_pred == 0).any(): + warn( + ( + "Some inputs do not have OOB scores. This probably means " + "too few trees were used to compute any reliable OOB " + "estimates." + ), + UserWarning, + ) + n_oob_pred[n_oob_pred == 0] = 1 + oob_pred[..., k] /= n_oob_pred[..., [k]] + + return oob_pred + + def _validate_y_class_weight(self, y): + # Default implementation + return y, None + + def _validate_X_predict(self, X): + """ + Validate X whenever one tries to predict, apply, predict_proba.""" + check_is_fitted(self) + if self.estimators_[0]._support_missing_values(X): + force_all_finite = "allow-nan" + else: + force_all_finite = True + + X = self._validate_data( + X, + dtype=DTYPE, + accept_sparse="csr", + reset=False, + force_all_finite=force_all_finite, + ) + if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc): + raise ValueError("No support for np.int64 index based sparse matrices") + return X + + @property + def feature_importances_(self): + """ + The impurity-based feature importances. + + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + Returns + ------- + feature_importances_ : ndarray of shape (n_features,) + The values of this array sum to 1, unless all trees are single node + trees consisting of only the root node, in which case it will be an + array of zeros. + """ + check_is_fitted(self) + + all_importances = Parallel(n_jobs=self.n_jobs, prefer="threads")( + delayed(getattr)(tree, "feature_importances_") + for tree in self.estimators_ + if tree.tree_.node_count > 1 + ) + + if not all_importances: + return np.zeros(self.n_features_in_, dtype=np.float64) + + all_importances = np.mean(all_importances, axis=0, dtype=np.float64) + return all_importances / np.sum(all_importances) + + def _get_estimators_indices(self): + # Get drawn indices along both sample and feature axes + for tree in self.estimators_: + if not self.bootstrap: + yield np.arange(self._n_samples, dtype=np.int32) + else: + # tree.random_state is actually an immutable integer seed rather + # than a mutable RandomState instance, so it's safe to use it + # repeatedly when calling this property. + seed = tree.random_state + # Operations accessing random_state must be performed identically + # to those in `_parallel_build_trees()` + yield _generate_sample_indices( + seed, self._n_samples, self._n_samples_bootstrap + ) + + @property + def estimators_samples_(self): + """The subset of drawn samples for each base estimator. + + Returns a dynamically generated list of indices identifying + the samples used for fitting each member of the ensemble, i.e., + the in-bag samples. + + Note: the list is re-created at each call to the property in order + to reduce the object memory footprint by not storing the sampling + data. Thus fetching the property may be slower than expected. + """ + return [sample_indices for sample_indices in self._get_estimators_indices()] + + def _more_tags(self): + # Only the criterion is required to determine if the tree supports + # missing values + estimator = type(self.estimator)(criterion=self.criterion) + return {"allow_nan": _safe_tags(estimator, key="allow_nan")} + + +def _accumulate_prediction(predict, X, out, lock): + """ + This is a utility function for joblib's Parallel. + + It can't go locally in ForestClassifier or ForestRegressor, because joblib + complains that it cannot pickle it when placed there. + """ + prediction = predict(X, check_input=False) + with lock: + if len(out) == 1: + out[0] += prediction + else: + for i in range(len(out)): + out[i] += prediction[i] + + +class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta): + """ + Base class for forest of trees-based classifiers. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + @abstractmethod + def __init__( + self, + estimator, + n_estimators=100, + *, + estimator_params=tuple(), + bootstrap=False, + oob_score=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + class_weight=None, + max_samples=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + estimator_params=estimator_params, + bootstrap=bootstrap, + oob_score=oob_score, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + warm_start=warm_start, + class_weight=class_weight, + max_samples=max_samples, + ) + + @staticmethod + def _get_oob_predictions(tree, X): + """Compute the OOB predictions for an individual tree. + + Parameters + ---------- + tree : DecisionTreeClassifier object + A single decision tree classifier. + X : ndarray of shape (n_samples, n_features) + The OOB samples. + + Returns + ------- + y_pred : ndarray of shape (n_samples, n_classes, n_outputs) + The OOB associated predictions. + """ + y_pred = tree.predict_proba(X, check_input=False) + y_pred = np.asarray(y_pred) + if y_pred.ndim == 2: + # binary and multiclass + y_pred = y_pred[..., np.newaxis] + else: + # Roll the first `n_outputs` axis to the last axis. We will reshape + # from a shape of (n_outputs, n_samples, n_classes) to a shape of + # (n_samples, n_classes, n_outputs). + y_pred = np.rollaxis(y_pred, axis=0, start=3) + return y_pred + + def _set_oob_score_and_attributes(self, X, y, scoring_function=None): + """Compute and set the OOB score and attributes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + y : ndarray of shape (n_samples, n_outputs) + The target matrix. + scoring_function : callable, default=None + Scoring function for OOB score. Defaults to `accuracy_score`. + """ + self.oob_decision_function_ = super()._compute_oob_predictions(X, y) + if self.oob_decision_function_.shape[-1] == 1: + # drop the n_outputs axis if there is a single output + self.oob_decision_function_ = self.oob_decision_function_.squeeze(axis=-1) + + if scoring_function is None: + scoring_function = accuracy_score + + self.oob_score_ = scoring_function( + y, np.argmax(self.oob_decision_function_, axis=1) + ) + + def _validate_y_class_weight(self, y): + check_classification_targets(y) + + y = np.copy(y) + expanded_class_weight = None + + if self.class_weight is not None: + y_original = np.copy(y) + + self.classes_ = [] + self.n_classes_ = [] + + y_store_unique_indices = np.zeros(y.shape, dtype=int) + for k in range(self.n_outputs_): + classes_k, y_store_unique_indices[:, k] = np.unique( + y[:, k], return_inverse=True + ) + self.classes_.append(classes_k) + self.n_classes_.append(classes_k.shape[0]) + y = y_store_unique_indices + + if self.class_weight is not None: + valid_presets = ("balanced", "balanced_subsample") + if isinstance(self.class_weight, str): + if self.class_weight not in valid_presets: + raise ValueError( + "Valid presets for class_weight include " + '"balanced" and "balanced_subsample".' + 'Given "%s".' + % self.class_weight + ) + if self.warm_start: + warn( + 'class_weight presets "balanced" or ' + '"balanced_subsample" are ' + "not recommended for warm_start if the fitted data " + "differs from the full dataset. In order to use " + '"balanced" weights, use compute_class_weight ' + '("balanced", classes, y). In place of y you can use ' + "a large enough sample of the full training set " + "target to properly estimate the class frequency " + "distributions. Pass the resulting weights as the " + "class_weight parameter." + ) + + if self.class_weight != "balanced_subsample" or not self.bootstrap: + if self.class_weight == "balanced_subsample": + class_weight = "balanced" + else: + class_weight = self.class_weight + expanded_class_weight = compute_sample_weight(class_weight, y_original) + + return y, expanded_class_weight + + def predict(self, X): + """ + Predict class for X. + + The predicted class of an input sample is a vote by the trees in + the forest, weighted by their probability estimates. That is, + the predicted class is the one with highest mean probability + estimate across the trees. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + y : ndarray of shape (n_samples,) or (n_samples, n_outputs) + The predicted classes. + """ + proba = self.predict_proba(X) + + if self.n_outputs_ == 1: + return self.classes_.take(np.argmax(proba, axis=1), axis=0) + + else: + n_samples = proba[0].shape[0] + # all dtypes should be the same, so just take the first + class_type = self.classes_[0].dtype + predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type) + + for k in range(self.n_outputs_): + predictions[:, k] = self.classes_[k].take( + np.argmax(proba[k], axis=1), axis=0 + ) + + return predictions + + def predict_proba(self, X): + """ + Predict class probabilities for X. + + The predicted class probabilities of an input sample are computed as + the mean predicted class probabilities of the trees in the forest. + The class probability of a single tree is the fraction of samples of + the same class in a leaf. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes), or a list of such arrays + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + # Check data + X = self._validate_X_predict(X) + + # Assign chunk of trees to jobs + n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) + + # avoid storing the output of every estimator by summing them here + all_proba = [ + np.zeros((X.shape[0], j), dtype=np.float64) + for j in np.atleast_1d(self.n_classes_) + ] + lock = threading.Lock() + Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem")( + delayed(_accumulate_prediction)(e.predict_proba, X, all_proba, lock) + for e in self.estimators_ + ) + + for proba in all_proba: + proba /= len(self.estimators_) + + if len(all_proba) == 1: + return all_proba[0] + else: + return all_proba + + def predict_log_proba(self, X): + """ + Predict class log-probabilities for X. + + The predicted class log-probabilities of an input sample is computed as + the log of the mean predicted class probabilities of the trees in the + forest. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes), or a list of such arrays + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + proba = self.predict_proba(X) + + if self.n_outputs_ == 1: + return np.log(proba) + + else: + for k in range(self.n_outputs_): + proba[k] = np.log(proba[k]) + + return proba + + def _more_tags(self): + return {"multilabel": True} + + +class ForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta): + """ + Base class for forest of trees-based regressors. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + @abstractmethod + def __init__( + self, + estimator, + n_estimators=100, + *, + estimator_params=tuple(), + bootstrap=False, + oob_score=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + max_samples=None, + ): + super().__init__( + estimator, + n_estimators=n_estimators, + estimator_params=estimator_params, + bootstrap=bootstrap, + oob_score=oob_score, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + warm_start=warm_start, + max_samples=max_samples, + ) + + def predict(self, X): + """ + Predict regression target for X. + + The predicted regression target of an input sample is computed as the + mean predicted regression targets of the trees in the forest. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + y : ndarray of shape (n_samples,) or (n_samples, n_outputs) + The predicted values. + """ + check_is_fitted(self) + # Check data + X = self._validate_X_predict(X) + + # Assign chunk of trees to jobs + n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) + + # avoid storing the output of every estimator by summing them here + if self.n_outputs_ > 1: + y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64) + else: + y_hat = np.zeros((X.shape[0]), dtype=np.float64) + + # Parallel loop + lock = threading.Lock() + Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem")( + delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock) + for e in self.estimators_ + ) + + y_hat /= len(self.estimators_) + + return y_hat + + @staticmethod + def _get_oob_predictions(tree, X): + """Compute the OOB predictions for an individual tree. + + Parameters + ---------- + tree : DecisionTreeRegressor object + A single decision tree regressor. + X : ndarray of shape (n_samples, n_features) + The OOB samples. + + Returns + ------- + y_pred : ndarray of shape (n_samples, 1, n_outputs) + The OOB associated predictions. + """ + y_pred = tree.predict(X, check_input=False) + if y_pred.ndim == 1: + # single output regression + y_pred = y_pred[:, np.newaxis, np.newaxis] + else: + # multioutput regression + y_pred = y_pred[:, np.newaxis, :] + return y_pred + + def _set_oob_score_and_attributes(self, X, y, scoring_function=None): + """Compute and set the OOB score and attributes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + y : ndarray of shape (n_samples, n_outputs) + The target matrix. + scoring_function : callable, default=None + Scoring function for OOB score. Defaults to `r2_score`. + """ + self.oob_prediction_ = super()._compute_oob_predictions(X, y).squeeze(axis=1) + if self.oob_prediction_.shape[-1] == 1: + # drop the n_outputs axis if there is a single output + self.oob_prediction_ = self.oob_prediction_.squeeze(axis=-1) + + if scoring_function is None: + scoring_function = r2_score + + self.oob_score_ = scoring_function(y, self.oob_prediction_) + + def _compute_partial_dependence_recursion(self, grid, target_features): + """Fast partial dependence computation. + + Parameters + ---------- + grid : ndarray of shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : ndarray of shape (n_target_features) + The set of target features for which the partial dependence + should be evaluated. + + Returns + ------- + averaged_predictions : ndarray of shape (n_samples,) + The value of the partial dependence function on each grid point. + """ + grid = np.asarray(grid, dtype=DTYPE, order="C") + averaged_predictions = np.zeros( + shape=grid.shape[0], dtype=np.float64, order="C" + ) + + for tree in self.estimators_: + # Note: we don't sum in parallel because the GIL isn't released in + # the fast method. + tree.tree_.compute_partial_dependence( + grid, target_features, averaged_predictions + ) + # Average over the forest + averaged_predictions /= len(self.estimators_) + + return averaged_predictions + + def _more_tags(self): + return {"multilabel": True} + + +class RandomForestClassifier(ForestClassifier): + """ + A random forest classifier. + + A random forest is a meta estimator that fits a number of decision tree + classifiers on various sub-samples of the dataset and uses averaging to + improve the predictive accuracy and control over-fitting. + Trees in the forest use the best split strategy, i.e. equivalent to passing + `splitter="best"` to the underlying :class:`~sklearn.tree.DecisionTreeRegressor`. + The sub-sample size is controlled with the `max_samples` parameter if + `bootstrap=True` (default), otherwise the whole dataset is used to build + each tree. + + For a comparison between tree-based ensemble models see the example + :ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_estimators : int, default=100 + The number of trees in the forest. + + .. versionchanged:: 0.22 + The default value of ``n_estimators`` changed from 10 to 100 + in 0.22. + + criterion : {"gini", "entropy", "log_loss"}, default="gini" + The function to measure the quality of a split. Supported criteria are + "gini" for the Gini impurity and "log_loss" and "entropy" both for the + Shannon information gain, see :ref:`tree_mathematical_formulation`. + Note: This parameter is tree-specific. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : {"sqrt", "log2", None}, int or float, default="sqrt" + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to `"sqrt"`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + max_leaf_nodes : int, default=None + Grow trees with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + bootstrap : bool, default=True + Whether bootstrap samples are used when building trees. If False, the + whole dataset is used to build each tree. + + oob_score : bool or callable, default=False + Whether to use out-of-bag samples to estimate the generalization score. + By default, :func:`~sklearn.metrics.accuracy_score` is used. + Provide a callable with signature `metric(y_true, y_pred)` to use a + custom metric. Only available if `bootstrap=True`. + + n_jobs : int, default=None + The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`, + :meth:`decision_path` and :meth:`apply` are all parallelized over the + trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` + context. ``-1`` means using all processors. See :term:`Glossary + ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls both the randomness of the bootstrapping of the samples used + when building trees (if ``bootstrap=True``) and the sampling of the + features to consider when looking for the best split at each node + (if ``max_features < n_features``). + See :term:`Glossary ` for details. + + verbose : int, default=0 + Controls the verbosity when fitting and predicting. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit a whole + new forest. See :term:`Glossary ` and + :ref:`gradient_boosting_warm_start` for details. + + class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \ + default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + Note that for multioutput (including multilabel) weights should be + defined for each class of every column in its own dict. For example, + for four-class multilabel classification weights should be + [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of + [{1:1}, {2:5}, {3:1}, {4:1}]. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + The "balanced_subsample" mode is the same as "balanced" except that + weights are computed based on the bootstrap sample for every tree + grown. + + For multi-output, the weights of each column of y will be multiplied. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + max_samples : int or float, default=None + If bootstrap is True, the number of samples to draw from X + to train each base estimator. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus, + `max_samples` should be in the interval `(0.0, 1.0]`. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multiclass classifications (i.e. when `n_classes > 2`), + - multioutput classifications (i.e. when `n_outputs_ > 1`), + - classifications trained on data with missing values. + + The constraints hold over the probability of the positive class. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + estimator_ : :class:`~sklearn.tree.DecisionTreeClassifier` + The child estimator template used to create the collection of fitted + sub-estimators. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of DecisionTreeClassifier + The collection of fitted sub-estimators. + + classes_ : ndarray of shape (n_classes,) or a list of such arrays + The classes labels (single output problem), or a list of arrays of + class labels (multi-output problem). + + n_classes_ : int or list + The number of classes (single output problem), or a list containing the + number of classes for each output (multi-output problem). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + oob_score_ : float + Score of the training dataset obtained using an out-of-bag estimate. + This attribute exists only when ``oob_score`` is True. + + oob_decision_function_ : ndarray of shape (n_samples, n_classes) or \ + (n_samples, n_classes, n_outputs) + Decision function computed with out-of-bag estimate on the training + set. If n_estimators is small it might be possible that a data point + was never left out during the bootstrap. In this case, + `oob_decision_function_` might contain NaN. This attribute exists + only when ``oob_score`` is True. + + estimators_samples_ : list of arrays + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. Each subset is defined by an array of the indices selected. + + .. versionadded:: 1.4 + + See Also + -------- + sklearn.tree.DecisionTreeClassifier : A decision tree classifier. + sklearn.ensemble.ExtraTreesClassifier : Ensemble of extremely randomized + tree classifiers. + sklearn.ensemble.HistGradientBoostingClassifier : A Histogram-based Gradient + Boosting Classification Tree, very fast for big datasets (n_samples >= + 10_000). + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + The features are always randomly permuted at each split. Therefore, + the best found split may vary, even with the same training data, + ``max_features=n_features`` and ``bootstrap=False``, if the improvement + of the criterion is identical for several splits enumerated during the + search of the best split. To obtain a deterministic behaviour during + fitting, ``random_state`` has to be fixed. + + References + ---------- + .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001. + + Examples + -------- + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=1000, n_features=4, + ... n_informative=2, n_redundant=0, + ... random_state=0, shuffle=False) + >>> clf = RandomForestClassifier(max_depth=2, random_state=0) + >>> clf.fit(X, y) + RandomForestClassifier(...) + >>> print(clf.predict([[0, 0, 0, 0]])) + [1] + """ + + _parameter_constraints: dict = { + **ForestClassifier._parameter_constraints, + **DecisionTreeClassifier._parameter_constraints, + "class_weight": [ + StrOptions({"balanced_subsample", "balanced"}), + dict, + list, + None, + ], + } + _parameter_constraints.pop("splitter") + + def __init__( + self, + n_estimators=100, + *, + criterion="gini", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="sqrt", + max_leaf_nodes=None, + min_impurity_decrease=0.0, + bootstrap=True, + oob_score=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + class_weight=None, + ccp_alpha=0.0, + max_samples=None, + monotonic_cst=None, + ): + super().__init__( + estimator=DecisionTreeClassifier(), + n_estimators=n_estimators, + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "max_leaf_nodes", + "min_impurity_decrease", + "random_state", + "ccp_alpha", + "monotonic_cst", + ), + bootstrap=bootstrap, + oob_score=oob_score, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + warm_start=warm_start, + class_weight=class_weight, + max_samples=max_samples, + ) + + self.criterion = criterion + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.max_leaf_nodes = max_leaf_nodes + self.min_impurity_decrease = min_impurity_decrease + self.monotonic_cst = monotonic_cst + self.ccp_alpha = ccp_alpha + + +class RandomForestRegressor(ForestRegressor): + """ + A random forest regressor. + + A random forest is a meta estimator that fits a number of decision tree + regressors on various sub-samples of the dataset and uses averaging to + improve the predictive accuracy and control over-fitting. + Trees in the forest use the best split strategy, i.e. equivalent to passing + `splitter="best"` to the underlying :class:`~sklearn.tree.DecisionTreeRegressor`. + The sub-sample size is controlled with the `max_samples` parameter if + `bootstrap=True` (default), otherwise the whole dataset is used to build + each tree. + + For a comparison between tree-based ensemble models see the example + :ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_estimators : int, default=100 + The number of trees in the forest. + + .. versionchanged:: 0.22 + The default value of ``n_estimators`` changed from 10 to 100 + in 0.22. + + criterion : {"squared_error", "absolute_error", "friedman_mse", "poisson"}, \ + default="squared_error" + The function to measure the quality of a split. Supported criteria + are "squared_error" for the mean squared error, which is equal to + variance reduction as feature selection criterion and minimizes the L2 + loss using the mean of each terminal node, "friedman_mse", which uses + mean squared error with Friedman's improvement score for potential + splits, "absolute_error" for the mean absolute error, which minimizes + the L1 loss using the median of each terminal node, and "poisson" which + uses reduction in Poisson deviance to find splits. + Training using "absolute_error" is significantly slower + than when using "squared_error". + + .. versionadded:: 0.18 + Mean Absolute Error (MAE) criterion. + + .. versionadded:: 1.0 + Poisson criterion. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : {"sqrt", "log2", None}, int or float, default=1.0 + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None or 1.0, then `max_features=n_features`. + + .. note:: + The default of 1.0 is equivalent to bagged trees and more + randomness can be achieved by setting smaller values, e.g. 0.3. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to 1.0. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + max_leaf_nodes : int, default=None + Grow trees with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + bootstrap : bool, default=True + Whether bootstrap samples are used when building trees. If False, the + whole dataset is used to build each tree. + + oob_score : bool or callable, default=False + Whether to use out-of-bag samples to estimate the generalization score. + By default, :func:`~sklearn.metrics.r2_score` is used. + Provide a callable with signature `metric(y_true, y_pred)` to use a + custom metric. Only available if `bootstrap=True`. + + n_jobs : int, default=None + The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`, + :meth:`decision_path` and :meth:`apply` are all parallelized over the + trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` + context. ``-1`` means using all processors. See :term:`Glossary + ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls both the randomness of the bootstrapping of the samples used + when building trees (if ``bootstrap=True``) and the sampling of the + features to consider when looking for the best split at each node + (if ``max_features < n_features``). + See :term:`Glossary ` for details. + + verbose : int, default=0 + Controls the verbosity when fitting and predicting. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit a whole + new forest. See :term:`Glossary ` and + :ref:`gradient_boosting_warm_start` for details. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + max_samples : int or float, default=None + If bootstrap is True, the number of samples to draw from X + to train each base estimator. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus, + `max_samples` should be in the interval `(0.0, 1.0]`. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonically increasing + - 0: no constraint + - -1: monotonically decreasing + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multioutput regressions (i.e. when `n_outputs_ > 1`), + - regressions trained on data with missing values. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + estimator_ : :class:`~sklearn.tree.DecisionTreeRegressor` + The child estimator template used to create the collection of fitted + sub-estimators. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of DecisionTreeRegressor + The collection of fitted sub-estimators. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + oob_score_ : float + Score of the training dataset obtained using an out-of-bag estimate. + This attribute exists only when ``oob_score`` is True. + + oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Prediction computed with out-of-bag estimate on the training set. + This attribute exists only when ``oob_score`` is True. + + estimators_samples_ : list of arrays + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. Each subset is defined by an array of the indices selected. + + .. versionadded:: 1.4 + + See Also + -------- + sklearn.tree.DecisionTreeRegressor : A decision tree regressor. + sklearn.ensemble.ExtraTreesRegressor : Ensemble of extremely randomized + tree regressors. + sklearn.ensemble.HistGradientBoostingRegressor : A Histogram-based Gradient + Boosting Regression Tree, very fast for big datasets (n_samples >= + 10_000). + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + The features are always randomly permuted at each split. Therefore, + the best found split may vary, even with the same training data, + ``max_features=n_features`` and ``bootstrap=False``, if the improvement + of the criterion is identical for several splits enumerated during the + search of the best split. To obtain a deterministic behaviour during + fitting, ``random_state`` has to be fixed. + + The default value ``max_features=1.0`` uses ``n_features`` + rather than ``n_features / 3``. The latter was originally suggested in + [1], whereas the former was more recently justified empirically in [2]. + + References + ---------- + .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001. + + .. [2] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized + trees", Machine Learning, 63(1), 3-42, 2006. + + Examples + -------- + >>> from sklearn.ensemble import RandomForestRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_features=4, n_informative=2, + ... random_state=0, shuffle=False) + >>> regr = RandomForestRegressor(max_depth=2, random_state=0) + >>> regr.fit(X, y) + RandomForestRegressor(...) + >>> print(regr.predict([[0, 0, 0, 0]])) + [-8.32987858] + """ + + _parameter_constraints: dict = { + **ForestRegressor._parameter_constraints, + **DecisionTreeRegressor._parameter_constraints, + } + _parameter_constraints.pop("splitter") + + def __init__( + self, + n_estimators=100, + *, + criterion="squared_error", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=1.0, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + bootstrap=True, + oob_score=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + ccp_alpha=0.0, + max_samples=None, + monotonic_cst=None, + ): + super().__init__( + estimator=DecisionTreeRegressor(), + n_estimators=n_estimators, + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "max_leaf_nodes", + "min_impurity_decrease", + "random_state", + "ccp_alpha", + "monotonic_cst", + ), + bootstrap=bootstrap, + oob_score=oob_score, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + warm_start=warm_start, + max_samples=max_samples, + ) + + self.criterion = criterion + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.max_leaf_nodes = max_leaf_nodes + self.min_impurity_decrease = min_impurity_decrease + self.ccp_alpha = ccp_alpha + self.monotonic_cst = monotonic_cst + + +class ExtraTreesClassifier(ForestClassifier): + """ + An extra-trees classifier. + + This class implements a meta estimator that fits a number of + randomized decision trees (a.k.a. extra-trees) on various sub-samples + of the dataset and uses averaging to improve the predictive accuracy + and control over-fitting. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_estimators : int, default=100 + The number of trees in the forest. + + .. versionchanged:: 0.22 + The default value of ``n_estimators`` changed from 10 to 100 + in 0.22. + + criterion : {"gini", "entropy", "log_loss"}, default="gini" + The function to measure the quality of a split. Supported criteria are + "gini" for the Gini impurity and "log_loss" and "entropy" both for the + Shannon information gain, see :ref:`tree_mathematical_formulation`. + Note: This parameter is tree-specific. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : {"sqrt", "log2", None}, int or float, default="sqrt" + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to `"sqrt"`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + max_leaf_nodes : int, default=None + Grow trees with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + bootstrap : bool, default=False + Whether bootstrap samples are used when building trees. If False, the + whole dataset is used to build each tree. + + oob_score : bool or callable, default=False + Whether to use out-of-bag samples to estimate the generalization score. + By default, :func:`~sklearn.metrics.accuracy_score` is used. + Provide a callable with signature `metric(y_true, y_pred)` to use a + custom metric. Only available if `bootstrap=True`. + + n_jobs : int, default=None + The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`, + :meth:`decision_path` and :meth:`apply` are all parallelized over the + trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` + context. ``-1`` means using all processors. See :term:`Glossary + ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls 3 sources of randomness: + + - the bootstrapping of the samples used when building trees + (if ``bootstrap=True``) + - the sampling of the features to consider when looking for the best + split at each node (if ``max_features < n_features``) + - the draw of the splits for each of the `max_features` + + See :term:`Glossary ` for details. + + verbose : int, default=0 + Controls the verbosity when fitting and predicting. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit a whole + new forest. See :term:`Glossary ` and + :ref:`gradient_boosting_warm_start` for details. + + class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \ + default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + Note that for multioutput (including multilabel) weights should be + defined for each class of every column in its own dict. For example, + for four-class multilabel classification weights should be + [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of + [{1:1}, {2:5}, {3:1}, {4:1}]. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + The "balanced_subsample" mode is the same as "balanced" except that + weights are computed based on the bootstrap sample for every tree + grown. + + For multi-output, the weights of each column of y will be multiplied. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + max_samples : int or float, default=None + If bootstrap is True, the number of samples to draw from X + to train each base estimator. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. Thus, + `max_samples` should be in the interval `(0.0, 1.0]`. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonically increasing + - 0: no constraint + - -1: monotonically decreasing + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multiclass classifications (i.e. when `n_classes > 2`), + - multioutput classifications (i.e. when `n_outputs_ > 1`), + - classifications trained on data with missing values. + + The constraints hold over the probability of the positive class. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + estimator_ : :class:`~sklearn.tree.ExtraTreeClassifier` + The child estimator template used to create the collection of fitted + sub-estimators. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of DecisionTreeClassifier + The collection of fitted sub-estimators. + + classes_ : ndarray of shape (n_classes,) or a list of such arrays + The classes labels (single output problem), or a list of arrays of + class labels (multi-output problem). + + n_classes_ : int or list + The number of classes (single output problem), or a list containing the + number of classes for each output (multi-output problem). + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + oob_score_ : float + Score of the training dataset obtained using an out-of-bag estimate. + This attribute exists only when ``oob_score`` is True. + + oob_decision_function_ : ndarray of shape (n_samples, n_classes) or \ + (n_samples, n_classes, n_outputs) + Decision function computed with out-of-bag estimate on the training + set. If n_estimators is small it might be possible that a data point + was never left out during the bootstrap. In this case, + `oob_decision_function_` might contain NaN. This attribute exists + only when ``oob_score`` is True. + + estimators_samples_ : list of arrays + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. Each subset is defined by an array of the indices selected. + + .. versionadded:: 1.4 + + See Also + -------- + ExtraTreesRegressor : An extra-trees regressor with random splits. + RandomForestClassifier : A random forest classifier with optimal splits. + RandomForestRegressor : Ensemble regressor using trees with optimal splits. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized + trees", Machine Learning, 63(1), 3-42, 2006. + + Examples + -------- + >>> from sklearn.ensemble import ExtraTreesClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_features=4, random_state=0) + >>> clf = ExtraTreesClassifier(n_estimators=100, random_state=0) + >>> clf.fit(X, y) + ExtraTreesClassifier(random_state=0) + >>> clf.predict([[0, 0, 0, 0]]) + array([1]) + """ + + _parameter_constraints: dict = { + **ForestClassifier._parameter_constraints, + **DecisionTreeClassifier._parameter_constraints, + "class_weight": [ + StrOptions({"balanced_subsample", "balanced"}), + dict, + list, + None, + ], + } + _parameter_constraints.pop("splitter") + + def __init__( + self, + n_estimators=100, + *, + criterion="gini", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="sqrt", + max_leaf_nodes=None, + min_impurity_decrease=0.0, + bootstrap=False, + oob_score=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + class_weight=None, + ccp_alpha=0.0, + max_samples=None, + monotonic_cst=None, + ): + super().__init__( + estimator=ExtraTreeClassifier(), + n_estimators=n_estimators, + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "max_leaf_nodes", + "min_impurity_decrease", + "random_state", + "ccp_alpha", + "monotonic_cst", + ), + bootstrap=bootstrap, + oob_score=oob_score, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + warm_start=warm_start, + class_weight=class_weight, + max_samples=max_samples, + ) + + self.criterion = criterion + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.max_leaf_nodes = max_leaf_nodes + self.min_impurity_decrease = min_impurity_decrease + self.ccp_alpha = ccp_alpha + self.monotonic_cst = monotonic_cst + + +class ExtraTreesRegressor(ForestRegressor): + """ + An extra-trees regressor. + + This class implements a meta estimator that fits a number of + randomized decision trees (a.k.a. extra-trees) on various sub-samples + of the dataset and uses averaging to improve the predictive accuracy + and control over-fitting. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_estimators : int, default=100 + The number of trees in the forest. + + .. versionchanged:: 0.22 + The default value of ``n_estimators`` changed from 10 to 100 + in 0.22. + + criterion : {"squared_error", "absolute_error", "friedman_mse", "poisson"}, \ + default="squared_error" + The function to measure the quality of a split. Supported criteria + are "squared_error" for the mean squared error, which is equal to + variance reduction as feature selection criterion and minimizes the L2 + loss using the mean of each terminal node, "friedman_mse", which uses + mean squared error with Friedman's improvement score for potential + splits, "absolute_error" for the mean absolute error, which minimizes + the L1 loss using the median of each terminal node, and "poisson" which + uses reduction in Poisson deviance to find splits. + Training using "absolute_error" is significantly slower + than when using "squared_error". + + .. versionadded:: 0.18 + Mean Absolute Error (MAE) criterion. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : {"sqrt", "log2", None}, int or float, default=1.0 + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None or 1.0, then `max_features=n_features`. + + .. note:: + The default of 1.0 is equivalent to bagged trees and more + randomness can be achieved by setting smaller values, e.g. 0.3. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to 1.0. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + max_leaf_nodes : int, default=None + Grow trees with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + bootstrap : bool, default=False + Whether bootstrap samples are used when building trees. If False, the + whole dataset is used to build each tree. + + oob_score : bool or callable, default=False + Whether to use out-of-bag samples to estimate the generalization score. + By default, :func:`~sklearn.metrics.r2_score` is used. + Provide a callable with signature `metric(y_true, y_pred)` to use a + custom metric. Only available if `bootstrap=True`. + + n_jobs : int, default=None + The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`, + :meth:`decision_path` and :meth:`apply` are all parallelized over the + trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` + context. ``-1`` means using all processors. See :term:`Glossary + ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls 3 sources of randomness: + + - the bootstrapping of the samples used when building trees + (if ``bootstrap=True``) + - the sampling of the features to consider when looking for the best + split at each node (if ``max_features < n_features``) + - the draw of the splits for each of the `max_features` + + See :term:`Glossary ` for details. + + verbose : int, default=0 + Controls the verbosity when fitting and predicting. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit a whole + new forest. See :term:`Glossary ` and + :ref:`gradient_boosting_warm_start` for details. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + max_samples : int or float, default=None + If bootstrap is True, the number of samples to draw from X + to train each base estimator. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. Thus, + `max_samples` should be in the interval `(0.0, 1.0]`. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonically increasing + - 0: no constraint + - -1: monotonically decreasing + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multioutput regressions (i.e. when `n_outputs_ > 1`), + - regressions trained on data with missing values. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` + The child estimator template used to create the collection of fitted + sub-estimators. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of DecisionTreeRegressor + The collection of fitted sub-estimators. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs. + + oob_score_ : float + Score of the training dataset obtained using an out-of-bag estimate. + This attribute exists only when ``oob_score`` is True. + + oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Prediction computed with out-of-bag estimate on the training set. + This attribute exists only when ``oob_score`` is True. + + estimators_samples_ : list of arrays + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. Each subset is defined by an array of the indices selected. + + .. versionadded:: 1.4 + + See Also + -------- + ExtraTreesClassifier : An extra-trees classifier with random splits. + RandomForestClassifier : A random forest classifier with optimal splits. + RandomForestRegressor : Ensemble regressor using trees with optimal splits. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", + Machine Learning, 63(1), 3-42, 2006. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.ensemble import ExtraTreesRegressor + >>> X, y = load_diabetes(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> reg = ExtraTreesRegressor(n_estimators=100, random_state=0).fit( + ... X_train, y_train) + >>> reg.score(X_test, y_test) + 0.2727... + """ + + _parameter_constraints: dict = { + **ForestRegressor._parameter_constraints, + **DecisionTreeRegressor._parameter_constraints, + } + _parameter_constraints.pop("splitter") + + def __init__( + self, + n_estimators=100, + *, + criterion="squared_error", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=1.0, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + bootstrap=False, + oob_score=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + ccp_alpha=0.0, + max_samples=None, + monotonic_cst=None, + ): + super().__init__( + estimator=ExtraTreeRegressor(), + n_estimators=n_estimators, + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "max_leaf_nodes", + "min_impurity_decrease", + "random_state", + "ccp_alpha", + "monotonic_cst", + ), + bootstrap=bootstrap, + oob_score=oob_score, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + warm_start=warm_start, + max_samples=max_samples, + ) + + self.criterion = criterion + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.max_leaf_nodes = max_leaf_nodes + self.min_impurity_decrease = min_impurity_decrease + self.ccp_alpha = ccp_alpha + self.monotonic_cst = monotonic_cst + + +class RandomTreesEmbedding(TransformerMixin, BaseForest): + """ + An ensemble of totally random trees. + + An unsupervised transformation of a dataset to a high-dimensional + sparse representation. A datapoint is coded according to which leaf of + each tree it is sorted into. Using a one-hot encoding of the leaves, + this leads to a binary coding with as many ones as there are trees in + the forest. + + The dimensionality of the resulting representation is + ``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``, + the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_estimators : int, default=100 + Number of trees in the forest. + + .. versionchanged:: 0.22 + The default value of ``n_estimators`` changed from 10 to 100 + in 0.22. + + max_depth : int, default=5 + The maximum depth of each tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` is the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` is the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_leaf_nodes : int, default=None + Grow trees with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + sparse_output : bool, default=True + Whether or not to return a sparse CSR matrix, as default behavior, + or to return a dense array compatible with dense pipeline operators. + + n_jobs : int, default=None + The number of jobs to run in parallel. :meth:`fit`, :meth:`transform`, + :meth:`decision_path` and :meth:`apply` are all parallelized over the + trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` + context. ``-1`` means using all processors. See :term:`Glossary + ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls the generation of the random `y` used to fit the trees + and the draw of the splits for each feature at the trees' nodes. + See :term:`Glossary ` for details. + + verbose : int, default=0 + Controls the verbosity when fitting and predicting. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit a whole + new forest. See :term:`Glossary ` and + :ref:`gradient_boosting_warm_start` for details. + + Attributes + ---------- + estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance + The child estimator template used to create the collection of fitted + sub-estimators. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of :class:`~sklearn.tree.ExtraTreeRegressor` instances + The collection of fitted sub-estimators. + + feature_importances_ : ndarray of shape (n_features,) + The feature importances (the higher, the more important the feature). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + one_hot_encoder_ : OneHotEncoder instance + One-hot encoder used to create the sparse embedding. + + estimators_samples_ : list of arrays + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. Each subset is defined by an array of the indices selected. + + .. versionadded:: 1.4 + + See Also + -------- + ExtraTreesClassifier : An extra-trees classifier. + ExtraTreesRegressor : An extra-trees regressor. + RandomForestClassifier : A random forest classifier. + RandomForestRegressor : A random forest regressor. + sklearn.tree.ExtraTreeClassifier: An extremely randomized + tree classifier. + sklearn.tree.ExtraTreeRegressor : An extremely randomized + tree regressor. + + References + ---------- + .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", + Machine Learning, 63(1), 3-42, 2006. + .. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative + visual codebooks using randomized clustering forests" + NIPS 2007 + + Examples + -------- + >>> from sklearn.ensemble import RandomTreesEmbedding + >>> X = [[0,0], [1,0], [0,1], [-1,0], [0,-1]] + >>> random_trees = RandomTreesEmbedding( + ... n_estimators=5, random_state=0, max_depth=1).fit(X) + >>> X_sparse_embedding = random_trees.transform(X) + >>> X_sparse_embedding.toarray() + array([[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.], + [0., 1., 1., 0., 1., 0., 0., 1., 1., 0.], + [0., 1., 0., 1., 0., 1., 0., 1., 0., 1.], + [1., 0., 1., 0., 1., 0., 1., 0., 1., 0.], + [0., 1., 1., 0., 1., 0., 0., 1., 1., 0.]]) + """ + + _parameter_constraints: dict = { + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "warm_start": ["boolean"], + **BaseDecisionTree._parameter_constraints, + "sparse_output": ["boolean"], + } + for param in ("max_features", "ccp_alpha", "splitter", "monotonic_cst"): + _parameter_constraints.pop(param) + + criterion = "squared_error" + max_features = 1 + + def __init__( + self, + n_estimators=100, + *, + max_depth=5, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + sparse_output=True, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + ): + super().__init__( + estimator=ExtraTreeRegressor(), + n_estimators=n_estimators, + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "max_leaf_nodes", + "min_impurity_decrease", + "random_state", + ), + bootstrap=False, + oob_score=False, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + warm_start=warm_start, + max_samples=None, + ) + + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_leaf_nodes = max_leaf_nodes + self.min_impurity_decrease = min_impurity_decrease + self.sparse_output = sparse_output + + def _set_oob_score_and_attributes(self, X, y, scoring_function=None): + raise NotImplementedError("OOB score not supported by tree embedding") + + def fit(self, X, y=None, sample_weight=None): + """ + Fit estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Use ``dtype=np.float32`` for maximum + efficiency. Sparse matrices are also supported, use sparse + ``csc_matrix`` for maximum efficiency. + + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. In the case of + classification, splits are also ignored if they would result in any + single class carrying a negative weight in either child node. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Parameters are validated in fit_transform + self.fit_transform(X, y, sample_weight=sample_weight) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, sample_weight=None): + """ + Fit estimator and transform dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data used to build forests. Use ``dtype=np.float32`` for + maximum efficiency. + + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. In the case of + classification, splits are also ignored if they would result in any + single class carrying a negative weight in either child node. + + Returns + ------- + X_transformed : sparse matrix of shape (n_samples, n_out) + Transformed dataset. + """ + rnd = check_random_state(self.random_state) + y = rnd.uniform(size=_num_samples(X)) + super().fit(X, y, sample_weight=sample_weight) + + self.one_hot_encoder_ = OneHotEncoder(sparse_output=self.sparse_output) + output = self.one_hot_encoder_.fit_transform(self.apply(X)) + self._n_features_out = output.shape[1] + return output + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Only used to validate feature names with the names seen in :meth:`fit`. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names, in the format of + `randomtreesembedding_{tree}_{leaf}`, where `tree` is the tree used + to generate the leaf and `leaf` is the index of a leaf node + in that tree. Note that the node indexing scheme is used to + index both nodes with children (split nodes) and leaf nodes. + Only the latter can be present as output features. + As a consequence, there are missing indices in the output + feature names. + """ + check_is_fitted(self, "_n_features_out") + _check_feature_names_in( + self, input_features=input_features, generate_names=False + ) + + feature_names = [ + f"randomtreesembedding_{tree}_{leaf}" + for tree in range(self.n_estimators) + for leaf in self.one_hot_encoder_.categories_[tree] + ] + return np.asarray(feature_names, dtype=object) + + def transform(self, X): + """ + Transform dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data to be transformed. Use ``dtype=np.float32`` for maximum + efficiency. Sparse matrices are also supported, use sparse + ``csr_matrix`` for maximum efficiency. + + Returns + ------- + X_transformed : sparse matrix of shape (n_samples, n_out) + Transformed dataset. + """ + check_is_fitted(self) + return self.one_hot_encoder_.transform(self.apply(X)) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_gb.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_gb.py new file mode 100644 index 0000000000000000000000000000000000000000..7c5dd6fbdac3c563827684cefd47e4d7444f53a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_gb.py @@ -0,0 +1,2168 @@ +"""Gradient Boosted Regression Trees. + +This module contains methods for fitting gradient boosted regression trees for +both classification and regression. + +The module structure is the following: + +- The ``BaseGradientBoosting`` base class implements a common ``fit`` method + for all the estimators in the module. Regression and classification + only differ in the concrete ``LossFunction`` used. + +- ``GradientBoostingClassifier`` implements gradient boosting for + classification problems. + +- ``GradientBoostingRegressor`` implements gradient boosting for + regression problems. +""" + +# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti, +# Arnaud Joly, Jacob Schreiber +# License: BSD 3 clause + +import math +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real +from time import time + +import numpy as np +from scipy.sparse import csc_matrix, csr_matrix, issparse + +from .._loss.loss import ( + _LOSSES, + AbsoluteError, + ExponentialLoss, + HalfBinomialLoss, + HalfMultinomialLoss, + HalfSquaredError, + HuberLoss, + PinballLoss, +) +from ..base import ClassifierMixin, RegressorMixin, _fit_context, is_classifier +from ..dummy import DummyClassifier, DummyRegressor +from ..exceptions import NotFittedError +from ..model_selection import train_test_split +from ..preprocessing import LabelEncoder +from ..tree import DecisionTreeRegressor +from ..tree._tree import DOUBLE, DTYPE, TREE_LEAF +from ..utils import check_array, check_random_state, column_or_1d +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils.multiclass import check_classification_targets +from ..utils.stats import _weighted_percentile +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._base import BaseEnsemble +from ._gradient_boosting import _random_sample_mask, predict_stage, predict_stages + +_LOSSES = _LOSSES.copy() +_LOSSES.update( + { + "quantile": PinballLoss, + "huber": HuberLoss, + } +) + + +def _safe_divide(numerator, denominator): + """Prevents overflow and division by zero.""" + # This is used for classifiers where the denominator might become zero exatly. + # For instance for log loss, HalfBinomialLoss, if proba=0 or proba=1 exactly, then + # denominator = hessian = 0, and we should set the node value in the line search to + # zero as there is no improvement of the loss possible. + # For numerical safety, we do this already for extremely tiny values. + if abs(denominator) < 1e-150: + return 0.0 + else: + # Cast to Python float to trigger Python errors, e.g. ZeroDivisionError, + # without relying on `np.errstate` that is not supported by Pyodide. + result = float(numerator) / float(denominator) + # Cast to Python float to trigger a ZeroDivisionError without relying + # on `np.errstate` that is not supported by Pyodide. + result = float(numerator) / float(denominator) + if math.isinf(result): + warnings.warn("overflow encountered in _safe_divide", RuntimeWarning) + return result + + +def _init_raw_predictions(X, estimator, loss, use_predict_proba): + """Return the initial raw predictions. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data array. + estimator : object + The estimator to use to compute the predictions. + loss : BaseLoss + An instance of a loss function class. + use_predict_proba : bool + Whether estimator.predict_proba is used instead of estimator.predict. + + Returns + ------- + raw_predictions : ndarray of shape (n_samples, K) + The initial raw predictions. K is equal to 1 for binary + classification and regression, and equal to the number of classes + for multiclass classification. ``raw_predictions`` is casted + into float64. + """ + # TODO: Use loss.fit_intercept_only where appropriate instead of + # DummyRegressor which is the default given by the `init` parameter, + # see also _init_state. + if use_predict_proba: + # Our parameter validation, set via _fit_context and _parameter_constraints + # already guarantees that estimator has a predict_proba method. + predictions = estimator.predict_proba(X) + if not loss.is_multiclass: + predictions = predictions[:, 1] # probability of positive class + eps = np.finfo(np.float32).eps # FIXME: This is quite large! + predictions = np.clip(predictions, eps, 1 - eps, dtype=np.float64) + else: + predictions = estimator.predict(X).astype(np.float64) + + if predictions.ndim == 1: + return loss.link.link(predictions).reshape(-1, 1) + else: + return loss.link.link(predictions) + + +def _update_terminal_regions( + loss, + tree, + X, + y, + neg_gradient, + raw_prediction, + sample_weight, + sample_mask, + learning_rate=0.1, + k=0, +): + """Update the leaf values to be predicted by the tree and raw_prediction. + + The current raw predictions of the model (of this stage) are updated. + + Additionally, the terminal regions (=leaves) of the given tree are updated as well. + This corresponds to the line search step in "Greedy Function Approximation" by + Friedman, Algorithm 1 step 5. + + Update equals: + argmin_{x} loss(y_true, raw_prediction_old + x * tree.value) + + For non-trivial cases like the Binomial loss, the update has no closed formula and + is an approximation, again, see the Friedman paper. + + Also note that the update formula for the SquaredError is the identity. Therefore, + in this case, the leaf values don't need an update and only the raw_predictions are + updated (with the learning rate included). + + Parameters + ---------- + loss : BaseLoss + tree : tree.Tree + The tree object. + X : ndarray of shape (n_samples, n_features) + The data array. + y : ndarray of shape (n_samples,) + The target labels. + neg_gradient : ndarray of shape (n_samples,) + The negative gradient. + raw_prediction : ndarray of shape (n_samples, n_trees_per_iteration) + The raw predictions (i.e. values from the tree leaves) of the + tree ensemble at iteration ``i - 1``. + sample_weight : ndarray of shape (n_samples,) + The weight of each sample. + sample_mask : ndarray of shape (n_samples,) + The sample mask to be used. + learning_rate : float, default=0.1 + Learning rate shrinks the contribution of each tree by + ``learning_rate``. + k : int, default=0 + The index of the estimator being updated. + """ + # compute leaf for each sample in ``X``. + terminal_regions = tree.apply(X) + + if not isinstance(loss, HalfSquaredError): + # mask all which are not in sample mask. + masked_terminal_regions = terminal_regions.copy() + masked_terminal_regions[~sample_mask] = -1 + + if isinstance(loss, HalfBinomialLoss): + + def compute_update(y_, indices, neg_gradient, raw_prediction, k): + # Make a single Newton-Raphson step, see "Additive Logistic Regression: + # A Statistical View of Boosting" FHT00 and note that we use a slightly + # different version (factor 2) of "F" with proba=expit(raw_prediction). + # Our node estimate is given by: + # sum(w * (y - prob)) / sum(w * prob * (1 - prob)) + # we take advantage that: y - prob = neg_gradient + neg_g = neg_gradient.take(indices, axis=0) + prob = y_ - neg_g + # numerator = negative gradient = y - prob + numerator = np.average(neg_g, weights=sw) + # denominator = hessian = prob * (1 - prob) + denominator = np.average(prob * (1 - prob), weights=sw) + return _safe_divide(numerator, denominator) + + elif isinstance(loss, HalfMultinomialLoss): + + def compute_update(y_, indices, neg_gradient, raw_prediction, k): + # we take advantage that: y - prob = neg_gradient + neg_g = neg_gradient.take(indices, axis=0) + prob = y_ - neg_g + K = loss.n_classes + # numerator = negative gradient * (k - 1) / k + # Note: The factor (k - 1)/k appears in the original papers "Greedy + # Function Approximation" by Friedman and "Additive Logistic + # Regression" by Friedman, Hastie, Tibshirani. This factor is, however, + # wrong or at least arbitrary as it directly multiplies the + # learning_rate. We keep it for backward compatibility. + numerator = np.average(neg_g, weights=sw) + numerator *= (K - 1) / K + # denominator = (diagonal) hessian = prob * (1 - prob) + denominator = np.average(prob * (1 - prob), weights=sw) + return _safe_divide(numerator, denominator) + + elif isinstance(loss, ExponentialLoss): + + def compute_update(y_, indices, neg_gradient, raw_prediction, k): + neg_g = neg_gradient.take(indices, axis=0) + # numerator = negative gradient = y * exp(-raw) - (1-y) * exp(raw) + numerator = np.average(neg_g, weights=sw) + # denominator = hessian = y * exp(-raw) + (1-y) * exp(raw) + # if y=0: hessian = exp(raw) = -neg_g + # y=1: hessian = exp(-raw) = neg_g + hessian = neg_g.copy() + hessian[y_ == 0] *= -1 + denominator = np.average(hessian, weights=sw) + return _safe_divide(numerator, denominator) + + else: + + def compute_update(y_, indices, neg_gradient, raw_prediction, k): + return loss.fit_intercept_only( + y_true=y_ - raw_prediction[indices, k], + sample_weight=sw, + ) + + # update each leaf (= perform line search) + for leaf in np.nonzero(tree.children_left == TREE_LEAF)[0]: + indices = np.nonzero(masked_terminal_regions == leaf)[ + 0 + ] # of terminal regions + y_ = y.take(indices, axis=0) + sw = None if sample_weight is None else sample_weight[indices] + update = compute_update(y_, indices, neg_gradient, raw_prediction, k) + + # TODO: Multiply here by learning rate instead of everywhere else. + tree.value[leaf, 0, 0] = update + + # update predictions (both in-bag and out-of-bag) + raw_prediction[:, k] += learning_rate * tree.value[:, 0, 0].take( + terminal_regions, axis=0 + ) + + +def set_huber_delta(loss, y_true, raw_prediction, sample_weight=None): + """Calculate and set self.closs.delta based on self.quantile.""" + abserr = np.abs(y_true - raw_prediction.squeeze()) + # sample_weight is always a ndarray, never None. + delta = _weighted_percentile(abserr, sample_weight, 100 * loss.quantile) + loss.closs.delta = float(delta) + + +class VerboseReporter: + """Reports verbose output to stdout. + + Parameters + ---------- + verbose : int + Verbosity level. If ``verbose==1`` output is printed once in a while + (when iteration mod verbose_mod is zero).; if larger than 1 then output + is printed for each update. + """ + + def __init__(self, verbose): + self.verbose = verbose + + def init(self, est, begin_at_stage=0): + """Initialize reporter + + Parameters + ---------- + est : Estimator + The estimator + + begin_at_stage : int, default=0 + stage at which to begin reporting + """ + # header fields and line format str + header_fields = ["Iter", "Train Loss"] + verbose_fmt = ["{iter:>10d}", "{train_score:>16.4f}"] + # do oob? + if est.subsample < 1: + header_fields.append("OOB Improve") + verbose_fmt.append("{oob_impr:>16.4f}") + header_fields.append("Remaining Time") + verbose_fmt.append("{remaining_time:>16s}") + + # print the header line + print(("%10s " + "%16s " * (len(header_fields) - 1)) % tuple(header_fields)) + + self.verbose_fmt = " ".join(verbose_fmt) + # plot verbose info each time i % verbose_mod == 0 + self.verbose_mod = 1 + self.start_time = time() + self.begin_at_stage = begin_at_stage + + def update(self, j, est): + """Update reporter with new iteration. + + Parameters + ---------- + j : int + The new iteration. + est : Estimator + The estimator. + """ + do_oob = est.subsample < 1 + # we need to take into account if we fit additional estimators. + i = j - self.begin_at_stage # iteration relative to the start iter + if (i + 1) % self.verbose_mod == 0: + oob_impr = est.oob_improvement_[j] if do_oob else 0 + remaining_time = ( + (est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1) + ) + if remaining_time > 60: + remaining_time = "{0:.2f}m".format(remaining_time / 60.0) + else: + remaining_time = "{0:.2f}s".format(remaining_time) + print( + self.verbose_fmt.format( + iter=j + 1, + train_score=est.train_score_[j], + oob_impr=oob_impr, + remaining_time=remaining_time, + ) + ) + if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0): + # adjust verbose frequency (powers of 10) + self.verbose_mod *= 10 + + +class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta): + """Abstract base class for Gradient Boosting.""" + + _parameter_constraints: dict = { + **DecisionTreeRegressor._parameter_constraints, + "learning_rate": [Interval(Real, 0.0, None, closed="left")], + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "criterion": [StrOptions({"friedman_mse", "squared_error"})], + "subsample": [Interval(Real, 0.0, 1.0, closed="right")], + "verbose": ["verbose"], + "warm_start": ["boolean"], + "validation_fraction": [Interval(Real, 0.0, 1.0, closed="neither")], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0.0, None, closed="left")], + } + _parameter_constraints.pop("splitter") + _parameter_constraints.pop("monotonic_cst") + + @abstractmethod + def __init__( + self, + *, + loss, + learning_rate, + n_estimators, + criterion, + min_samples_split, + min_samples_leaf, + min_weight_fraction_leaf, + max_depth, + min_impurity_decrease, + init, + subsample, + max_features, + ccp_alpha, + random_state, + alpha=0.9, + verbose=0, + max_leaf_nodes=None, + warm_start=False, + validation_fraction=0.1, + n_iter_no_change=None, + tol=1e-4, + ): + self.n_estimators = n_estimators + self.learning_rate = learning_rate + self.loss = loss + self.criterion = criterion + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.subsample = subsample + self.max_features = max_features + self.max_depth = max_depth + self.min_impurity_decrease = min_impurity_decrease + self.ccp_alpha = ccp_alpha + self.init = init + self.random_state = random_state + self.alpha = alpha + self.verbose = verbose + self.max_leaf_nodes = max_leaf_nodes + self.warm_start = warm_start + self.validation_fraction = validation_fraction + self.n_iter_no_change = n_iter_no_change + self.tol = tol + + @abstractmethod + def _encode_y(self, y=None, sample_weight=None): + """Called by fit to validate and encode y.""" + + @abstractmethod + def _get_loss(self, sample_weight): + """Get loss object from sklearn._loss.loss.""" + + def _fit_stage( + self, + i, + X, + y, + raw_predictions, + sample_weight, + sample_mask, + random_state, + X_csc=None, + X_csr=None, + ): + """Fit another stage of ``n_trees_per_iteration_`` trees.""" + original_y = y + + if isinstance(self._loss, HuberLoss): + set_huber_delta( + loss=self._loss, + y_true=y, + raw_prediction=raw_predictions, + sample_weight=sample_weight, + ) + # TODO: Without oob, i.e. with self.subsample = 1.0, we could call + # self._loss.loss_gradient and use it to set train_score_. + # But note that train_score_[i] is the score AFTER fitting the i-th tree. + # Note: We need the negative gradient! + neg_gradient = -self._loss.gradient( + y_true=y, + raw_prediction=raw_predictions, + sample_weight=None, # We pass sample_weights to the tree directly. + ) + # 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1) + # on neg_gradient to simplify the loop over n_trees_per_iteration_. + if neg_gradient.ndim == 1: + neg_g_view = neg_gradient.reshape((-1, 1)) + else: + neg_g_view = neg_gradient + + for k in range(self.n_trees_per_iteration_): + if self._loss.is_multiclass: + y = np.array(original_y == k, dtype=np.float64) + + # induce regression tree on the negative gradient + tree = DecisionTreeRegressor( + criterion=self.criterion, + splitter="best", + max_depth=self.max_depth, + min_samples_split=self.min_samples_split, + min_samples_leaf=self.min_samples_leaf, + min_weight_fraction_leaf=self.min_weight_fraction_leaf, + min_impurity_decrease=self.min_impurity_decrease, + max_features=self.max_features, + max_leaf_nodes=self.max_leaf_nodes, + random_state=random_state, + ccp_alpha=self.ccp_alpha, + ) + + if self.subsample < 1.0: + # no inplace multiplication! + sample_weight = sample_weight * sample_mask.astype(np.float64) + + X = X_csc if X_csc is not None else X + tree.fit( + X, neg_g_view[:, k], sample_weight=sample_weight, check_input=False + ) + + # update tree leaves + X_for_tree_update = X_csr if X_csr is not None else X + _update_terminal_regions( + self._loss, + tree.tree_, + X_for_tree_update, + y, + neg_g_view[:, k], + raw_predictions, + sample_weight, + sample_mask, + learning_rate=self.learning_rate, + k=k, + ) + + # add tree to ensemble + self.estimators_[i, k] = tree + + return raw_predictions + + def _set_max_features(self): + """Set self.max_features_.""" + if isinstance(self.max_features, str): + if self.max_features == "auto": + if is_classifier(self): + max_features = max(1, int(np.sqrt(self.n_features_in_))) + else: + max_features = self.n_features_in_ + elif self.max_features == "sqrt": + max_features = max(1, int(np.sqrt(self.n_features_in_))) + else: # self.max_features == "log2" + max_features = max(1, int(np.log2(self.n_features_in_))) + elif self.max_features is None: + max_features = self.n_features_in_ + elif isinstance(self.max_features, Integral): + max_features = self.max_features + else: # float + max_features = max(1, int(self.max_features * self.n_features_in_)) + + self.max_features_ = max_features + + def _init_state(self): + """Initialize model state and allocate model state data structures.""" + + self.init_ = self.init + if self.init_ is None: + if is_classifier(self): + self.init_ = DummyClassifier(strategy="prior") + elif isinstance(self._loss, (AbsoluteError, HuberLoss)): + self.init_ = DummyRegressor(strategy="quantile", quantile=0.5) + elif isinstance(self._loss, PinballLoss): + self.init_ = DummyRegressor(strategy="quantile", quantile=self.alpha) + else: + self.init_ = DummyRegressor(strategy="mean") + + self.estimators_ = np.empty( + (self.n_estimators, self.n_trees_per_iteration_), dtype=object + ) + self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64) + # do oob? + if self.subsample < 1.0: + self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64) + self.oob_scores_ = np.zeros((self.n_estimators), dtype=np.float64) + self.oob_score_ = np.nan + + def _clear_state(self): + """Clear the state of the gradient boosting model.""" + if hasattr(self, "estimators_"): + self.estimators_ = np.empty((0, 0), dtype=object) + if hasattr(self, "train_score_"): + del self.train_score_ + if hasattr(self, "oob_improvement_"): + del self.oob_improvement_ + if hasattr(self, "oob_scores_"): + del self.oob_scores_ + if hasattr(self, "oob_score_"): + del self.oob_score_ + if hasattr(self, "init_"): + del self.init_ + if hasattr(self, "_rng"): + del self._rng + + def _resize_state(self): + """Add additional ``n_estimators`` entries to all attributes.""" + # self.n_estimators is the number of additional est to fit + total_n_estimators = self.n_estimators + if total_n_estimators < self.estimators_.shape[0]: + raise ValueError( + "resize with smaller n_estimators %d < %d" + % (total_n_estimators, self.estimators_[0]) + ) + + self.estimators_ = np.resize( + self.estimators_, (total_n_estimators, self.n_trees_per_iteration_) + ) + self.train_score_ = np.resize(self.train_score_, total_n_estimators) + if self.subsample < 1 or hasattr(self, "oob_improvement_"): + # if do oob resize arrays or create new if not available + if hasattr(self, "oob_improvement_"): + self.oob_improvement_ = np.resize( + self.oob_improvement_, total_n_estimators + ) + self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators) + self.oob_score_ = np.nan + else: + self.oob_improvement_ = np.zeros( + (total_n_estimators,), dtype=np.float64 + ) + self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64) + self.oob_score_ = np.nan + + def _is_fitted(self): + return len(getattr(self, "estimators_", [])) > 0 + + def _check_initialized(self): + """Check that the estimator is initialized, raising an error if not.""" + check_is_fitted(self) + + @_fit_context( + # GradientBoosting*.init is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None, monitor=None): + """Fit the gradient boosting model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + y : array-like of shape (n_samples,) + Target values (strings or integers in classification, real numbers + in regression) + For classification, labels must correspond to classes. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. In the case of + classification, splits are also ignored if they would result in any + single class carrying a negative weight in either child node. + + monitor : callable, default=None + The monitor is called after each iteration with the current + iteration, a reference to the estimator and the local variables of + ``_fit_stages`` as keyword arguments ``callable(i, self, + locals())``. If the callable returns ``True`` the fitting procedure + is stopped. The monitor can be used for various things such as + computing held-out estimates, early stopping, model introspect, and + snapshotting. + + Returns + ------- + self : object + Fitted estimator. + """ + if not self.warm_start: + self._clear_state() + + # Check input + # Since check_array converts both X and y to the same dtype, but the + # trees use different types for X and y, checking them separately. + + X, y = self._validate_data( + X, y, accept_sparse=["csr", "csc", "coo"], dtype=DTYPE, multi_output=True + ) + sample_weight_is_none = sample_weight is None + sample_weight = _check_sample_weight(sample_weight, X) + if sample_weight_is_none: + y = self._encode_y(y=y, sample_weight=None) + else: + y = self._encode_y(y=y, sample_weight=sample_weight) + y = column_or_1d(y, warn=True) # TODO: Is this still required? + + self._set_max_features() + + # self.loss is guaranteed to be a string + self._loss = self._get_loss(sample_weight=sample_weight) + + if self.n_iter_no_change is not None: + stratify = y if is_classifier(self) else None + ( + X_train, + X_val, + y_train, + y_val, + sample_weight_train, + sample_weight_val, + ) = train_test_split( + X, + y, + sample_weight, + random_state=self.random_state, + test_size=self.validation_fraction, + stratify=stratify, + ) + if is_classifier(self): + if self.n_classes_ != np.unique(y_train).shape[0]: + # We choose to error here. The problem is that the init + # estimator would be trained on y, which has some missing + # classes now, so its predictions would not have the + # correct shape. + raise ValueError( + "The training data after the early stopping split " + "is missing some classes. Try using another random " + "seed." + ) + else: + X_train, y_train, sample_weight_train = X, y, sample_weight + X_val = y_val = sample_weight_val = None + + n_samples = X_train.shape[0] + + # First time calling fit. + if not self._is_fitted(): + # init state + self._init_state() + + # fit initial model and initialize raw predictions + if self.init_ == "zero": + raw_predictions = np.zeros( + shape=(n_samples, self.n_trees_per_iteration_), + dtype=np.float64, + ) + else: + # XXX clean this once we have a support_sample_weight tag + if sample_weight_is_none: + self.init_.fit(X_train, y_train) + else: + msg = ( + "The initial estimator {} does not support sample " + "weights.".format(self.init_.__class__.__name__) + ) + try: + self.init_.fit( + X_train, y_train, sample_weight=sample_weight_train + ) + except TypeError as e: + if "unexpected keyword argument 'sample_weight'" in str(e): + # regular estimator without SW support + raise ValueError(msg) from e + else: # regular estimator whose input checking failed + raise + except ValueError as e: + if ( + "pass parameters to specific steps of " + "your pipeline using the " + "stepname__parameter" + in str(e) + ): # pipeline + raise ValueError(msg) from e + else: # regular estimator whose input checking failed + raise + + raw_predictions = _init_raw_predictions( + X_train, self.init_, self._loss, is_classifier(self) + ) + + begin_at_stage = 0 + + # The rng state must be preserved if warm_start is True + self._rng = check_random_state(self.random_state) + + # warm start: this is not the first time fit was called + else: + # add more estimators to fitted model + # invariant: warm_start = True + if self.n_estimators < self.estimators_.shape[0]: + raise ValueError( + "n_estimators=%d must be larger or equal to " + "estimators_.shape[0]=%d when " + "warm_start==True" % (self.n_estimators, self.estimators_.shape[0]) + ) + begin_at_stage = self.estimators_.shape[0] + # The requirements of _raw_predict + # are more constrained than fit. It accepts only CSR + # matrices. Finite values have already been checked in _validate_data. + X_train = check_array( + X_train, + dtype=DTYPE, + order="C", + accept_sparse="csr", + force_all_finite=False, + ) + raw_predictions = self._raw_predict(X_train) + self._resize_state() + + # fit the boosting stages + n_stages = self._fit_stages( + X_train, + y_train, + raw_predictions, + sample_weight_train, + self._rng, + X_val, + y_val, + sample_weight_val, + begin_at_stage, + monitor, + ) + + # change shape of arrays after fit (early-stopping or additional ests) + if n_stages != self.estimators_.shape[0]: + self.estimators_ = self.estimators_[:n_stages] + self.train_score_ = self.train_score_[:n_stages] + if hasattr(self, "oob_improvement_"): + # OOB scores were computed + self.oob_improvement_ = self.oob_improvement_[:n_stages] + self.oob_scores_ = self.oob_scores_[:n_stages] + self.oob_score_ = self.oob_scores_[-1] + self.n_estimators_ = n_stages + return self + + def _fit_stages( + self, + X, + y, + raw_predictions, + sample_weight, + random_state, + X_val, + y_val, + sample_weight_val, + begin_at_stage=0, + monitor=None, + ): + """Iteratively fits the stages. + + For each stage it computes the progress (OOB, train score) + and delegates to ``_fit_stage``. + Returns the number of stages fit; might differ from ``n_estimators`` + due to early stopping. + """ + n_samples = X.shape[0] + do_oob = self.subsample < 1.0 + sample_mask = np.ones((n_samples,), dtype=bool) + n_inbag = max(1, int(self.subsample * n_samples)) + + if self.verbose: + verbose_reporter = VerboseReporter(verbose=self.verbose) + verbose_reporter.init(self, begin_at_stage) + + X_csc = csc_matrix(X) if issparse(X) else None + X_csr = csr_matrix(X) if issparse(X) else None + + if self.n_iter_no_change is not None: + loss_history = np.full(self.n_iter_no_change, np.inf) + # We create a generator to get the predictions for X_val after + # the addition of each successive stage + y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False) + + # Older versions of GBT had its own loss functions. With the new common + # private loss function submodule _loss, we often are a factor of 2 + # away from the old version. Here we keep backward compatibility for + # oob_scores_ and oob_improvement_, even if the old way is quite + # inconsistent (sometimes the gradient is half the gradient, sometimes + # not). + if isinstance( + self._loss, + ( + HalfSquaredError, + HalfBinomialLoss, + ), + ): + factor = 2 + else: + factor = 1 + + # perform boosting iterations + i = begin_at_stage + for i in range(begin_at_stage, self.n_estimators): + # subsampling + if do_oob: + sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) + y_oob_masked = y[~sample_mask] + sample_weight_oob_masked = sample_weight[~sample_mask] + if i == 0: # store the initial loss to compute the OOB score + initial_loss = factor * self._loss( + y_true=y_oob_masked, + raw_prediction=raw_predictions[~sample_mask], + sample_weight=sample_weight_oob_masked, + ) + + # fit next stage of trees + raw_predictions = self._fit_stage( + i, + X, + y, + raw_predictions, + sample_weight, + sample_mask, + random_state, + X_csc=X_csc, + X_csr=X_csr, + ) + + # track loss + if do_oob: + self.train_score_[i] = factor * self._loss( + y_true=y[sample_mask], + raw_prediction=raw_predictions[sample_mask], + sample_weight=sample_weight[sample_mask], + ) + self.oob_scores_[i] = factor * self._loss( + y_true=y_oob_masked, + raw_prediction=raw_predictions[~sample_mask], + sample_weight=sample_weight_oob_masked, + ) + previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1] + self.oob_improvement_[i] = previous_loss - self.oob_scores_[i] + self.oob_score_ = self.oob_scores_[-1] + else: + # no need to fancy index w/ no subsampling + self.train_score_[i] = factor * self._loss( + y_true=y, + raw_prediction=raw_predictions, + sample_weight=sample_weight, + ) + + if self.verbose > 0: + verbose_reporter.update(i, self) + + if monitor is not None: + early_stopping = monitor(i, self, locals()) + if early_stopping: + break + + # We also provide an early stopping based on the score from + # validation set (X_val, y_val), if n_iter_no_change is set + if self.n_iter_no_change is not None: + # By calling next(y_val_pred_iter), we get the predictions + # for X_val after the addition of the current stage + validation_loss = factor * self._loss( + y_val, next(y_val_pred_iter), sample_weight_val + ) + + # Require validation_score to be better (less) than at least + # one of the last n_iter_no_change evaluations + if np.any(validation_loss + self.tol < loss_history): + loss_history[i % len(loss_history)] = validation_loss + else: + break + + return i + 1 + + def _make_estimator(self, append=True): + # we don't need _make_estimator + raise NotImplementedError() + + def _raw_predict_init(self, X): + """Check input and compute raw predictions of the init estimator.""" + self._check_initialized() + X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) + if self.init_ == "zero": + raw_predictions = np.zeros( + shape=(X.shape[0], self.n_trees_per_iteration_), dtype=np.float64 + ) + else: + raw_predictions = _init_raw_predictions( + X, self.init_, self._loss, is_classifier(self) + ) + return raw_predictions + + def _raw_predict(self, X): + """Return the sum of the trees raw predictions (+ init estimator).""" + check_is_fitted(self) + raw_predictions = self._raw_predict_init(X) + predict_stages(self.estimators_, X, self.learning_rate, raw_predictions) + return raw_predictions + + def _staged_raw_predict(self, X, check_input=True): + """Compute raw predictions of ``X`` for each iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + If False, the input arrays X will not be checked. + + Returns + ------- + raw_predictions : generator of ndarray of shape (n_samples, k) + The raw predictions of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + Regression and binary classification are special cases with + ``k == 1``, otherwise ``k==n_classes``. + """ + if check_input: + X = self._validate_data( + X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False + ) + raw_predictions = self._raw_predict_init(X) + for i in range(self.estimators_.shape[0]): + predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions) + yield raw_predictions.copy() + + @property + def feature_importances_(self): + """The impurity-based feature importances. + + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + Returns + ------- + feature_importances_ : ndarray of shape (n_features,) + The values of this array sum to 1, unless all trees are single node + trees consisting of only the root node, in which case it will be an + array of zeros. + """ + self._check_initialized() + + relevant_trees = [ + tree + for stage in self.estimators_ + for tree in stage + if tree.tree_.node_count > 1 + ] + if not relevant_trees: + # degenerate case where all trees have only one node + return np.zeros(shape=self.n_features_in_, dtype=np.float64) + + relevant_feature_importances = [ + tree.tree_.compute_feature_importances(normalize=False) + for tree in relevant_trees + ] + avg_feature_importances = np.mean( + relevant_feature_importances, axis=0, dtype=np.float64 + ) + return avg_feature_importances / np.sum(avg_feature_importances) + + def _compute_partial_dependence_recursion(self, grid, target_features): + """Fast partial dependence computation. + + Parameters + ---------- + grid : ndarray of shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : ndarray of shape (n_target_features,) + The set of target features for which the partial dependence + should be evaluated. + + Returns + ------- + averaged_predictions : ndarray of shape \ + (n_trees_per_iteration_, n_samples) + The value of the partial dependence function on each grid point. + """ + if self.init is not None: + warnings.warn( + "Using recursion method with a non-constant init predictor " + "will lead to incorrect partial dependence values. " + "Got init=%s." + % self.init, + UserWarning, + ) + grid = np.asarray(grid, dtype=DTYPE, order="C") + n_estimators, n_trees_per_stage = self.estimators_.shape + averaged_predictions = np.zeros( + (n_trees_per_stage, grid.shape[0]), dtype=np.float64, order="C" + ) + for stage in range(n_estimators): + for k in range(n_trees_per_stage): + tree = self.estimators_[stage, k].tree_ + tree.compute_partial_dependence( + grid, target_features, averaged_predictions[k] + ) + averaged_predictions *= self.learning_rate + + return averaged_predictions + + def apply(self, X): + """Apply trees in the ensemble to X, return leaf indices. + + .. versionadded:: 0.17 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will + be converted to a sparse ``csr_matrix``. + + Returns + ------- + X_leaves : array-like of shape (n_samples, n_estimators, n_classes) + For each datapoint x in X and for each tree in the ensemble, + return the index of the leaf x ends up in each estimator. + In the case of binary classification n_classes is 1. + """ + + self._check_initialized() + X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) + + # n_classes will be equal to 1 in the binary classification or the + # regression case. + n_estimators, n_classes = self.estimators_.shape + leaves = np.zeros((X.shape[0], n_estimators, n_classes)) + + for i in range(n_estimators): + for j in range(n_classes): + estimator = self.estimators_[i, j] + leaves[:, i, j] = estimator.apply(X, check_input=False) + + return leaves + + +class GradientBoostingClassifier(ClassifierMixin, BaseGradientBoosting): + """Gradient Boosting for classification. + + This algorithm builds an additive model in a forward stage-wise fashion; it + allows for the optimization of arbitrary differentiable loss functions. In + each stage ``n_classes_`` regression trees are fit on the negative gradient + of the loss function, e.g. binary or multiclass log loss. Binary + classification is a special case where only a single regression tree is + induced. + + :class:`sklearn.ensemble.HistGradientBoostingClassifier` is a much faster + variant of this algorithm for intermediate datasets (`n_samples >= 10_000`). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : {'log_loss', 'exponential'}, default='log_loss' + The loss function to be optimized. 'log_loss' refers to binomial and + multinomial deviance, the same as used in logistic regression. + It is a good choice for classification with probabilistic outputs. + For loss 'exponential', gradient boosting recovers the AdaBoost algorithm. + + learning_rate : float, default=0.1 + Learning rate shrinks the contribution of each tree by `learning_rate`. + There is a trade-off between learning_rate and n_estimators. + Values must be in the range `[0.0, inf)`. + + n_estimators : int, default=100 + The number of boosting stages to perform. Gradient boosting + is fairly robust to over-fitting so a large number usually + results in better performance. + Values must be in the range `[1, inf)`. + + subsample : float, default=1.0 + The fraction of samples to be used for fitting the individual base + learners. If smaller than 1.0 this results in Stochastic Gradient + Boosting. `subsample` interacts with the parameter `n_estimators`. + Choosing `subsample < 1.0` leads to a reduction of variance + and an increase in bias. + Values must be in the range `(0.0, 1.0]`. + + criterion : {'friedman_mse', 'squared_error'}, default='friedman_mse' + The function to measure the quality of a split. Supported criteria are + 'friedman_mse' for the mean squared error with improvement score by + Friedman, 'squared_error' for mean squared error. The default value of + 'friedman_mse' is generally the best as it can provide a better + approximation in some cases. + + .. versionadded:: 0.18 + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, values must be in the range `[2, inf)`. + - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split` + will be `ceil(min_samples_split * n_samples)`. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, values must be in the range `[1, inf)`. + - If float, values must be in the range `(0.0, 1.0)` and `min_samples_leaf` + will be `ceil(min_samples_leaf * n_samples)`. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + Values must be in the range `[0.0, 0.5]`. + + max_depth : int or None, default=3 + Maximum depth of the individual regression estimators. The maximum + depth limits the number of nodes in the tree. Tune this parameter + for best performance; the best value depends on the interaction + of the input variables. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + If int, values must be in the range `[1, inf)`. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + Values must be in the range `[0.0, inf)`. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + init : estimator or 'zero', default=None + An estimator object that is used to compute the initial predictions. + ``init`` has to provide :term:`fit` and :term:`predict_proba`. If + 'zero', the initial raw predictions are set to zero. By default, a + ``DummyEstimator`` predicting the classes priors is used. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to each Tree estimator at each + boosting iteration. + In addition, it controls the random permutation of the features at + each split (see Notes for more details). + It also controls the random splitting of the training data to obtain a + validation set if `n_iter_no_change` is not None. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + max_features : {'sqrt', 'log2'}, int or float, default=None + The number of features to consider when looking for the best split: + + - If int, values must be in the range `[1, inf)`. + - If float, values must be in the range `(0.0, 1.0]` and the features + considered at each split will be `max(1, int(max_features * n_features_in_))`. + - If 'sqrt', then `max_features=sqrt(n_features)`. + - If 'log2', then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + Choosing `max_features < n_features` leads to a reduction of variance + and an increase in bias. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints progress and performance + once in a while (the more trees the lower the frequency). If greater + than 1 then it prints progress and performance for every tree. + Values must be in the range `[0, inf)`. + + max_leaf_nodes : int, default=None + Grow trees with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + Values must be in the range `[2, inf)`. + If `None`, then unlimited number of leaf nodes. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just erase the + previous solution. See :term:`the Glossary `. + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Values must be in the range `(0.0, 1.0)`. + Only used if ``n_iter_no_change`` is set to an integer. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=None + ``n_iter_no_change`` is used to decide if early stopping will be used + to terminate training when validation score is not improving. By + default it is set to None to disable early stopping. If set to a + number, it will set aside ``validation_fraction`` size of the training + data as validation and terminate training when validation score is not + improving in all of the previous ``n_iter_no_change`` numbers of + iterations. The split is stratified. + Values must be in the range `[1, inf)`. + See + :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`. + + .. versionadded:: 0.20 + + tol : float, default=1e-4 + Tolerance for the early stopping. When the loss is not improving + by at least tol for ``n_iter_no_change`` iterations (if set to a + number), the training stops. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.20 + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. + Values must be in the range `[0.0, inf)`. + See :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + Attributes + ---------- + n_estimators_ : int + The number of estimators as selected by early stopping (if + ``n_iter_no_change`` is specified). Otherwise it is set to + ``n_estimators``. + + .. versionadded:: 0.20 + + n_trees_per_iteration_ : int + The number of trees that are built at each iteration. For binary classifiers, + this is always 1. + + .. versionadded:: 1.4.0 + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + oob_improvement_ : ndarray of shape (n_estimators,) + The improvement in loss on the out-of-bag samples + relative to the previous iteration. + ``oob_improvement_[0]`` is the improvement in + loss of the first stage over the ``init`` estimator. + Only available if ``subsample < 1.0``. + + oob_scores_ : ndarray of shape (n_estimators,) + The full history of the loss values on the out-of-bag + samples. Only available if `subsample < 1.0`. + + .. versionadded:: 1.3 + + oob_score_ : float + The last value of the loss on the out-of-bag samples. It is + the same as `oob_scores_[-1]`. Only available if `subsample < 1.0`. + + .. versionadded:: 1.3 + + train_score_ : ndarray of shape (n_estimators,) + The i-th score ``train_score_[i]`` is the loss of the + model at iteration ``i`` on the in-bag sample. + If ``subsample == 1`` this is the loss on the training data. + + init_ : estimator + The estimator that provides the initial predictions. Set via the ``init`` + argument. + + estimators_ : ndarray of DecisionTreeRegressor of \ + shape (n_estimators, ``n_trees_per_iteration_``) + The collection of fitted sub-estimators. ``n_trees_per_iteration_`` is 1 for + binary classification, otherwise ``n_classes``. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_classes_ : int + The number of classes. + + max_features_ : int + The inferred value of max_features. + + See Also + -------- + HistGradientBoostingClassifier : Histogram-based Gradient Boosting + Classification Tree. + sklearn.tree.DecisionTreeClassifier : A decision tree classifier. + RandomForestClassifier : A meta-estimator that fits a number of decision + tree classifiers on various sub-samples of the dataset and uses + averaging to improve the predictive accuracy and control over-fitting. + AdaBoostClassifier : A meta-estimator that begins by fitting a classifier + on the original dataset and then fits additional copies of the + classifier on the same dataset where the weights of incorrectly + classified instances are adjusted such that subsequent classifiers + focus more on difficult cases. + + Notes + ----- + The features are always randomly permuted at each split. Therefore, + the best found split may vary, even with the same training data and + ``max_features=n_features``, if the improvement of the criterion is + identical for several splits enumerated during the search of the best + split. To obtain a deterministic behaviour during fitting, + ``random_state`` has to be fixed. + + References + ---------- + J. Friedman, Greedy Function Approximation: A Gradient Boosting + Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. + + J. Friedman, Stochastic Gradient Boosting, 1999 + + T. Hastie, R. Tibshirani and J. Friedman. + Elements of Statistical Learning Ed. 2, Springer, 2009. + + Examples + -------- + The following example shows how to fit a gradient boosting classifier with + 100 decision stumps as weak learners. + + >>> from sklearn.datasets import make_hastie_10_2 + >>> from sklearn.ensemble import GradientBoostingClassifier + + >>> X, y = make_hastie_10_2(random_state=0) + >>> X_train, X_test = X[:2000], X[2000:] + >>> y_train, y_test = y[:2000], y[2000:] + + >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, + ... max_depth=1, random_state=0).fit(X_train, y_train) + >>> clf.score(X_test, y_test) + 0.913... + """ + + _parameter_constraints: dict = { + **BaseGradientBoosting._parameter_constraints, + "loss": [StrOptions({"log_loss", "exponential"})], + "init": [StrOptions({"zero"}), None, HasMethods(["fit", "predict_proba"])], + } + + def __init__( + self, + *, + loss="log_loss", + learning_rate=0.1, + n_estimators=100, + subsample=1.0, + criterion="friedman_mse", + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_depth=3, + min_impurity_decrease=0.0, + init=None, + random_state=None, + max_features=None, + verbose=0, + max_leaf_nodes=None, + warm_start=False, + validation_fraction=0.1, + n_iter_no_change=None, + tol=1e-4, + ccp_alpha=0.0, + ): + super().__init__( + loss=loss, + learning_rate=learning_rate, + n_estimators=n_estimators, + criterion=criterion, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_depth=max_depth, + init=init, + subsample=subsample, + max_features=max_features, + random_state=random_state, + verbose=verbose, + max_leaf_nodes=max_leaf_nodes, + min_impurity_decrease=min_impurity_decrease, + warm_start=warm_start, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + tol=tol, + ccp_alpha=ccp_alpha, + ) + + def _encode_y(self, y, sample_weight): + # encode classes into 0 ... n_classes - 1 and sets attributes classes_ + # and n_trees_per_iteration_ + check_classification_targets(y) + + label_encoder = LabelEncoder() + encoded_y_int = label_encoder.fit_transform(y) + self.classes_ = label_encoder.classes_ + n_classes = self.classes_.shape[0] + # only 1 tree for binary classification. For multiclass classification, + # we build 1 tree per class. + self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes + encoded_y = encoded_y_int.astype(float, copy=False) + + # From here on, it is additional to the HGBT case. + # expose n_classes_ attribute + self.n_classes_ = n_classes + if sample_weight is None: + n_trim_classes = n_classes + else: + n_trim_classes = np.count_nonzero(np.bincount(encoded_y_int, sample_weight)) + + if n_trim_classes < 2: + raise ValueError( + "y contains %d class after sample_weight " + "trimmed classes with zero weights, while a " + "minimum of 2 classes are required." % n_trim_classes + ) + return encoded_y + + def _get_loss(self, sample_weight): + if self.loss == "log_loss": + if self.n_classes_ == 2: + return HalfBinomialLoss(sample_weight=sample_weight) + else: + return HalfMultinomialLoss( + sample_weight=sample_weight, n_classes=self.n_classes_ + ) + elif self.loss == "exponential": + if self.n_classes_ > 2: + raise ValueError( + f"loss='{self.loss}' is only suitable for a binary classification " + f"problem, you have n_classes={self.n_classes_}. " + "Please use loss='log_loss' instead." + ) + else: + return ExponentialLoss(sample_weight=sample_weight) + + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + score : ndarray of shape (n_samples, n_classes) or (n_samples,) + The decision function of the input samples, which corresponds to + the raw values predicted from the trees of the ensemble . The + order of the classes corresponds to that in the attribute + :term:`classes_`. Regression and binary classification produce an + array of shape (n_samples,). + """ + X = self._validate_data( + X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False + ) + raw_predictions = self._raw_predict(X) + if raw_predictions.shape[1] == 1: + return raw_predictions.ravel() + return raw_predictions + + def staged_decision_function(self, X): + """Compute decision function of ``X`` for each iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Yields + ------ + score : generator of ndarray of shape (n_samples, k) + The decision function of the input samples, which corresponds to + the raw values predicted from the trees of the ensemble . The + classes corresponds to that in the attribute :term:`classes_`. + Regression and binary classification are special cases with + ``k == 1``, otherwise ``k==n_classes``. + """ + yield from self._staged_raw_predict(X) + + def predict(self, X): + """Predict class for X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted values. + """ + raw_predictions = self.decision_function(X) + if raw_predictions.ndim == 1: # decision_function already squeezed it + encoded_classes = (raw_predictions >= 0).astype(int) + else: + encoded_classes = np.argmax(raw_predictions, axis=1) + return self.classes_[encoded_classes] + + def staged_predict(self, X): + """Predict class at each stage for X. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted value of the input samples. + """ + if self.n_classes_ == 2: # n_trees_per_iteration_ = 1 + for raw_predictions in self._staged_raw_predict(X): + encoded_classes = (raw_predictions.squeeze() >= 0).astype(int) + yield self.classes_.take(encoded_classes, axis=0) + else: + for raw_predictions in self._staged_raw_predict(X): + encoded_classes = np.argmax(raw_predictions, axis=1) + yield self.classes_.take(encoded_classes, axis=0) + + def predict_proba(self, X): + """Predict class probabilities for X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + + Raises + ------ + AttributeError + If the ``loss`` does not support probabilities. + """ + raw_predictions = self.decision_function(X) + return self._loss.predict_proba(raw_predictions) + + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class log-probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + + Raises + ------ + AttributeError + If the ``loss`` does not support probabilities. + """ + proba = self.predict_proba(X) + return np.log(proba) + + def staged_predict_proba(self, X): + """Predict class probabilities at each stage for X. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted value of the input samples. + """ + try: + for raw_predictions in self._staged_raw_predict(X): + yield self._loss.predict_proba(raw_predictions) + except NotFittedError: + raise + except AttributeError as e: + raise AttributeError( + "loss=%r does not support predict_proba" % self.loss + ) from e + + +class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting): + """Gradient Boosting for regression. + + This estimator builds an additive model in a forward stage-wise fashion; it + allows for the optimization of arbitrary differentiable loss functions. In + each stage a regression tree is fit on the negative gradient of the given + loss function. + + :class:`sklearn.ensemble.HistGradientBoostingRegressor` is a much faster + variant of this algorithm for intermediate datasets (`n_samples >= 10_000`). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : {'squared_error', 'absolute_error', 'huber', 'quantile'}, \ + default='squared_error' + Loss function to be optimized. 'squared_error' refers to the squared + error for regression. 'absolute_error' refers to the absolute error of + regression and is a robust loss function. 'huber' is a + combination of the two. 'quantile' allows quantile regression (use + `alpha` to specify the quantile). + + learning_rate : float, default=0.1 + Learning rate shrinks the contribution of each tree by `learning_rate`. + There is a trade-off between learning_rate and n_estimators. + Values must be in the range `[0.0, inf)`. + + n_estimators : int, default=100 + The number of boosting stages to perform. Gradient boosting + is fairly robust to over-fitting so a large number usually + results in better performance. + Values must be in the range `[1, inf)`. + + subsample : float, default=1.0 + The fraction of samples to be used for fitting the individual base + learners. If smaller than 1.0 this results in Stochastic Gradient + Boosting. `subsample` interacts with the parameter `n_estimators`. + Choosing `subsample < 1.0` leads to a reduction of variance + and an increase in bias. + Values must be in the range `(0.0, 1.0]`. + + criterion : {'friedman_mse', 'squared_error'}, default='friedman_mse' + The function to measure the quality of a split. Supported criteria are + "friedman_mse" for the mean squared error with improvement score by + Friedman, "squared_error" for mean squared error. The default value of + "friedman_mse" is generally the best as it can provide a better + approximation in some cases. + + .. versionadded:: 0.18 + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, values must be in the range `[2, inf)`. + - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split` + will be `ceil(min_samples_split * n_samples)`. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, values must be in the range `[1, inf)`. + - If float, values must be in the range `(0.0, 1.0)` and `min_samples_leaf` + will be `ceil(min_samples_leaf * n_samples)`. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + Values must be in the range `[0.0, 0.5]`. + + max_depth : int or None, default=3 + Maximum depth of the individual regression estimators. The maximum + depth limits the number of nodes in the tree. Tune this parameter + for best performance; the best value depends on the interaction + of the input variables. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + If int, values must be in the range `[1, inf)`. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + Values must be in the range `[0.0, inf)`. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + init : estimator or 'zero', default=None + An estimator object that is used to compute the initial predictions. + ``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the + initial raw predictions are set to zero. By default a + ``DummyEstimator`` is used, predicting either the average target value + (for loss='squared_error'), or a quantile for the other losses. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to each Tree estimator at each + boosting iteration. + In addition, it controls the random permutation of the features at + each split (see Notes for more details). + It also controls the random splitting of the training data to obtain a + validation set if `n_iter_no_change` is not None. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + max_features : {'sqrt', 'log2'}, int or float, default=None + The number of features to consider when looking for the best split: + + - If int, values must be in the range `[1, inf)`. + - If float, values must be in the range `(0.0, 1.0]` and the features + considered at each split will be `max(1, int(max_features * n_features_in_))`. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + Choosing `max_features < n_features` leads to a reduction of variance + and an increase in bias. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + alpha : float, default=0.9 + The alpha-quantile of the huber loss function and the quantile + loss function. Only if ``loss='huber'`` or ``loss='quantile'``. + Values must be in the range `(0.0, 1.0)`. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints progress and performance + once in a while (the more trees the lower the frequency). If greater + than 1 then it prints progress and performance for every tree. + Values must be in the range `[0, inf)`. + + max_leaf_nodes : int, default=None + Grow trees with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + Values must be in the range `[2, inf)`. + If None, then unlimited number of leaf nodes. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just erase the + previous solution. See :term:`the Glossary `. + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Values must be in the range `(0.0, 1.0)`. + Only used if ``n_iter_no_change`` is set to an integer. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=None + ``n_iter_no_change`` is used to decide if early stopping will be used + to terminate training when validation score is not improving. By + default it is set to None to disable early stopping. If set to a + number, it will set aside ``validation_fraction`` size of the training + data as validation and terminate training when validation score is not + improving in all of the previous ``n_iter_no_change`` numbers of + iterations. + Values must be in the range `[1, inf)`. + See + :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`. + + .. versionadded:: 0.20 + + tol : float, default=1e-4 + Tolerance for the early stopping. When the loss is not improving + by at least tol for ``n_iter_no_change`` iterations (if set to a + number), the training stops. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.20 + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. + Values must be in the range `[0.0, inf)`. + See :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + Attributes + ---------- + n_estimators_ : int + The number of estimators as selected by early stopping (if + ``n_iter_no_change`` is specified). Otherwise it is set to + ``n_estimators``. + + n_trees_per_iteration_ : int + The number of trees that are built at each iteration. For regressors, this is + always 1. + + .. versionadded:: 1.4.0 + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + oob_improvement_ : ndarray of shape (n_estimators,) + The improvement in loss on the out-of-bag samples + relative to the previous iteration. + ``oob_improvement_[0]`` is the improvement in + loss of the first stage over the ``init`` estimator. + Only available if ``subsample < 1.0``. + + oob_scores_ : ndarray of shape (n_estimators,) + The full history of the loss values on the out-of-bag + samples. Only available if `subsample < 1.0`. + + .. versionadded:: 1.3 + + oob_score_ : float + The last value of the loss on the out-of-bag samples. It is + the same as `oob_scores_[-1]`. Only available if `subsample < 1.0`. + + .. versionadded:: 1.3 + + train_score_ : ndarray of shape (n_estimators,) + The i-th score ``train_score_[i]`` is the loss of the + model at iteration ``i`` on the in-bag sample. + If ``subsample == 1`` this is the loss on the training data. + + init_ : estimator + The estimator that provides the initial predictions. Set via the ``init`` + argument. + + estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, 1) + The collection of fitted sub-estimators. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + max_features_ : int + The inferred value of max_features. + + See Also + -------- + HistGradientBoostingRegressor : Histogram-based Gradient Boosting + Classification Tree. + sklearn.tree.DecisionTreeRegressor : A decision tree regressor. + sklearn.ensemble.RandomForestRegressor : A random forest regressor. + + Notes + ----- + The features are always randomly permuted at each split. Therefore, + the best found split may vary, even with the same training data and + ``max_features=n_features``, if the improvement of the criterion is + identical for several splits enumerated during the search of the best + split. To obtain a deterministic behaviour during fitting, + ``random_state`` has to be fixed. + + References + ---------- + J. Friedman, Greedy Function Approximation: A Gradient Boosting + Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. + + J. Friedman, Stochastic Gradient Boosting, 1999 + + T. Hastie, R. Tibshirani and J. Friedman. + Elements of Statistical Learning Ed. 2, Springer, 2009. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.ensemble import GradientBoostingRegressor + >>> from sklearn.model_selection import train_test_split + >>> X, y = make_regression(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> reg = GradientBoostingRegressor(random_state=0) + >>> reg.fit(X_train, y_train) + GradientBoostingRegressor(random_state=0) + >>> reg.predict(X_test[1:2]) + array([-61...]) + >>> reg.score(X_test, y_test) + 0.4... + """ + + _parameter_constraints: dict = { + **BaseGradientBoosting._parameter_constraints, + "loss": [StrOptions({"squared_error", "absolute_error", "huber", "quantile"})], + "init": [StrOptions({"zero"}), None, HasMethods(["fit", "predict"])], + "alpha": [Interval(Real, 0.0, 1.0, closed="neither")], + } + + def __init__( + self, + *, + loss="squared_error", + learning_rate=0.1, + n_estimators=100, + subsample=1.0, + criterion="friedman_mse", + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_depth=3, + min_impurity_decrease=0.0, + init=None, + random_state=None, + max_features=None, + alpha=0.9, + verbose=0, + max_leaf_nodes=None, + warm_start=False, + validation_fraction=0.1, + n_iter_no_change=None, + tol=1e-4, + ccp_alpha=0.0, + ): + super().__init__( + loss=loss, + learning_rate=learning_rate, + n_estimators=n_estimators, + criterion=criterion, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_depth=max_depth, + init=init, + subsample=subsample, + max_features=max_features, + min_impurity_decrease=min_impurity_decrease, + random_state=random_state, + alpha=alpha, + verbose=verbose, + max_leaf_nodes=max_leaf_nodes, + warm_start=warm_start, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + tol=tol, + ccp_alpha=ccp_alpha, + ) + + def _encode_y(self, y=None, sample_weight=None): + # Just convert y to the expected dtype + self.n_trees_per_iteration_ = 1 + y = y.astype(DOUBLE, copy=False) + return y + + def _get_loss(self, sample_weight): + if self.loss in ("quantile", "huber"): + return _LOSSES[self.loss](sample_weight=sample_weight, quantile=self.alpha) + else: + return _LOSSES[self.loss](sample_weight=sample_weight) + + def predict(self, X): + """Predict regression target for X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted values. + """ + X = self._validate_data( + X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False + ) + # In regression we can directly return the raw value from the trees. + return self._raw_predict(X).ravel() + + def staged_predict(self, X): + """Predict regression target at each stage for X. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted value of the input samples. + """ + for raw_predictions in self._staged_raw_predict(X): + yield raw_predictions.ravel() + + def apply(self, X): + """Apply trees in the ensemble to X, return leaf indices. + + .. versionadded:: 0.17 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will + be converted to a sparse ``csr_matrix``. + + Returns + ------- + X_leaves : array-like of shape (n_samples, n_estimators) + For each datapoint x in X and for each tree in the ensemble, + return the index of the leaf x ends up in each estimator. + """ + + leaves = super().apply(X) + leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0]) + return leaves diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fbeb4433b0148ddecf464b8b8cf9e4b5b7ef9bf5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..879fae1189f87e24b9a704f7bb713836e9debf74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py @@ -0,0 +1,5 @@ +"""This module implements histogram-based gradient boosting estimators. + +The implementation is a port from pygbm which is itself strongly inspired +from LightGBM. +""" diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..48b8b9e6904f13e246b3e90acf958e797b9cd6dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1074e85e3084ecf78809573fbdfcdb2832ff7b3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a9223ea145648504d7834eaa6302901c2a9b4a3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py new file mode 100644 index 0000000000000000000000000000000000000000..8cf40d2a64539212afb347d72570e2c31bee6262 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py @@ -0,0 +1,798 @@ +""" +This module contains the TreeGrower class. + +TreeGrower builds a regression tree fitting a Newton-Raphson step, based on +the gradients and hessians of the training data. +""" +# Author: Nicolas Hug + +import numbers +from heapq import heappop, heappush +from timeit import default_timer as time + +import numpy as np + +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads + +from ._bitset import set_raw_bitset_from_binned_bitset +from .common import ( + PREDICTOR_RECORD_DTYPE, + X_BITSET_INNER_DTYPE, + Y_DTYPE, + MonotonicConstraint, +) +from .histogram import HistogramBuilder +from .predictor import TreePredictor +from .splitting import Splitter +from .utils import sum_parallel + +EPS = np.finfo(Y_DTYPE).eps # to avoid zero division errors + + +class TreeNode: + """Tree Node class used in TreeGrower. + + This isn't used for prediction purposes, only for training (see + TreePredictor). + + Parameters + ---------- + depth : int + The depth of the node, i.e. its distance from the root. + sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 + The indices of the samples at the node. + sum_gradients : float + The sum of the gradients of the samples at the node. + sum_hessians : float + The sum of the hessians of the samples at the node. + + Attributes + ---------- + depth : int + The depth of the node, i.e. its distance from the root. + sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 + The indices of the samples at the node. + sum_gradients : float + The sum of the gradients of the samples at the node. + sum_hessians : float + The sum of the hessians of the samples at the node. + split_info : SplitInfo or None + The result of the split evaluation. + is_leaf : bool + True if node is a leaf + left_child : TreeNode or None + The left child of the node. None for leaves. + right_child : TreeNode or None + The right child of the node. None for leaves. + value : float or None + The value of the leaf, as computed in finalize_leaf(). None for + non-leaf nodes. + partition_start : int + start position of the node's sample_indices in splitter.partition. + partition_stop : int + stop position of the node's sample_indices in splitter.partition. + allowed_features : None or ndarray, dtype=int + Indices of features allowed to split for children. + interaction_cst_indices : None or list of ints + Indices of the interaction sets that have to be applied on splits of + child nodes. The fewer sets the stronger the constraint as fewer sets + contain fewer features. + children_lower_bound : float + children_upper_bound : float + """ + + split_info = None + left_child = None + right_child = None + histograms = None + + # start and stop indices of the node in the splitter.partition + # array. Concretely, + # self.sample_indices = view(self.splitter.partition[start:stop]) + # Please see the comments about splitter.partition and + # splitter.split_indices for more info about this design. + # These 2 attributes are only used in _update_raw_prediction, because we + # need to iterate over the leaves and I don't know how to efficiently + # store the sample_indices views because they're all of different sizes. + partition_start = 0 + partition_stop = 0 + + def __init__(self, depth, sample_indices, sum_gradients, sum_hessians, value=None): + self.depth = depth + self.sample_indices = sample_indices + self.n_samples = sample_indices.shape[0] + self.sum_gradients = sum_gradients + self.sum_hessians = sum_hessians + self.value = value + self.is_leaf = False + self.allowed_features = None + self.interaction_cst_indices = None + self.set_children_bounds(float("-inf"), float("+inf")) + + def set_children_bounds(self, lower, upper): + """Set children values bounds to respect monotonic constraints.""" + + # These are bounds for the node's *children* values, not the node's + # value. The bounds are used in the splitter when considering potential + # left and right child. + self.children_lower_bound = lower + self.children_upper_bound = upper + + def __lt__(self, other_node): + """Comparison for priority queue. + + Nodes with high gain are higher priority than nodes with low gain. + + heapq.heappush only need the '<' operator. + heapq.heappop take the smallest item first (smaller is higher + priority). + + Parameters + ---------- + other_node : TreeNode + The node to compare with. + """ + return self.split_info.gain > other_node.split_info.gain + + +class TreeGrower: + """Tree grower class used to build a tree. + + The tree is fitted to predict the values of a Newton-Raphson step. The + splits are considered in a best-first fashion, and the quality of a + split is defined in splitting._split_gain. + + Parameters + ---------- + X_binned : ndarray of shape (n_samples, n_features), dtype=np.uint8 + The binned input samples. Must be Fortran-aligned. + gradients : ndarray of shape (n_samples,) + The gradients of each training sample. Those are the gradients of the + loss w.r.t the predictions, evaluated at iteration ``i - 1``. + hessians : ndarray of shape (n_samples,) + The hessians of each training sample. Those are the hessians of the + loss w.r.t the predictions, evaluated at iteration ``i - 1``. + max_leaf_nodes : int, default=None + The maximum number of leaves for each tree. If None, there is no + maximum limit. + max_depth : int, default=None + The maximum depth of each tree. The depth of a tree is the number of + edges to go from the root to the deepest leaf. + Depth isn't constrained by default. + min_samples_leaf : int, default=20 + The minimum number of samples per leaf. + min_gain_to_split : float, default=0. + The minimum gain needed to split a node. Splits with lower gain will + be ignored. + min_hessian_to_split : float, default=1e-3 + The minimum sum of hessians needed in each node. Splits that result in + at least one child having a sum of hessians less than + ``min_hessian_to_split`` are discarded. + n_bins : int, default=256 + The total number of bins, including the bin for missing values. Used + to define the shape of the histograms. + n_bins_non_missing : ndarray, dtype=np.uint32, default=None + For each feature, gives the number of bins actually used for + non-missing values. For features with a lot of unique values, this + is equal to ``n_bins - 1``. If it's an int, all features are + considered to have the same number of bins. If None, all features + are considered to have ``n_bins - 1`` bins. + has_missing_values : bool or ndarray, dtype=bool, default=False + Whether each feature contains missing values (in the training data). + If it's a bool, the same value is used for all features. + is_categorical : ndarray of bool of shape (n_features,), default=None + Indicates categorical features. + monotonic_cst : array-like of int of shape (n_features,), dtype=int, default=None + Indicates the monotonic constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + Read more in the :ref:`User Guide `. + interaction_cst : list of sets of integers, default=None + List of interaction constraints. + l2_regularization : float, default=0. + The L2 regularization parameter. + feature_fraction_per_split : float, default=1 + Proportion of randomly chosen features in each and every node split. + This is a form of regularization, smaller values make the trees weaker + learners and might prevent overfitting. + rng : Generator + Numpy random Generator used for feature subsampling. + shrinkage : float, default=1. + The shrinkage parameter to apply to the leaves values, also known as + learning rate. + n_threads : int, default=None + Number of OpenMP threads to use. `_openmp_effective_n_threads` is called + to determine the effective number of threads use, which takes cgroups CPU + quotes into account. See the docstring of `_openmp_effective_n_threads` + for details. + + Attributes + ---------- + histogram_builder : HistogramBuilder + splitter : Splitter + root : TreeNode + finalized_leaves : list of TreeNode + splittable_nodes : list of TreeNode + missing_values_bin_idx : int + Equals n_bins - 1 + n_categorical_splits : int + n_features : int + n_nodes : int + total_find_split_time : float + Time spent finding the best splits + total_compute_hist_time : float + Time spent computing histograms + total_apply_split_time : float + Time spent splitting nodes + with_monotonic_cst : bool + Whether there are monotonic constraints that apply. False iff monotonic_cst is + None. + """ + + def __init__( + self, + X_binned, + gradients, + hessians, + max_leaf_nodes=None, + max_depth=None, + min_samples_leaf=20, + min_gain_to_split=0.0, + min_hessian_to_split=1e-3, + n_bins=256, + n_bins_non_missing=None, + has_missing_values=False, + is_categorical=None, + monotonic_cst=None, + interaction_cst=None, + l2_regularization=0.0, + feature_fraction_per_split=1.0, + rng=np.random.default_rng(), + shrinkage=1.0, + n_threads=None, + ): + self._validate_parameters( + X_binned, + min_gain_to_split, + min_hessian_to_split, + ) + n_threads = _openmp_effective_n_threads(n_threads) + + if n_bins_non_missing is None: + n_bins_non_missing = n_bins - 1 + + if isinstance(n_bins_non_missing, numbers.Integral): + n_bins_non_missing = np.array( + [n_bins_non_missing] * X_binned.shape[1], dtype=np.uint32 + ) + else: + n_bins_non_missing = np.asarray(n_bins_non_missing, dtype=np.uint32) + + if isinstance(has_missing_values, bool): + has_missing_values = [has_missing_values] * X_binned.shape[1] + has_missing_values = np.asarray(has_missing_values, dtype=np.uint8) + + # `monotonic_cst` validation is done in _validate_monotonic_cst + # at the estimator level and therefore the following should not be + # needed when using the public API. + if monotonic_cst is None: + monotonic_cst = np.full( + shape=X_binned.shape[1], + fill_value=MonotonicConstraint.NO_CST, + dtype=np.int8, + ) + else: + monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8) + self.with_monotonic_cst = np.any(monotonic_cst != MonotonicConstraint.NO_CST) + + if is_categorical is None: + is_categorical = np.zeros(shape=X_binned.shape[1], dtype=np.uint8) + else: + is_categorical = np.asarray(is_categorical, dtype=np.uint8) + + if np.any( + np.logical_and( + is_categorical == 1, monotonic_cst != MonotonicConstraint.NO_CST + ) + ): + raise ValueError("Categorical features cannot have monotonic constraints.") + + hessians_are_constant = hessians.shape[0] == 1 + self.histogram_builder = HistogramBuilder( + X_binned, n_bins, gradients, hessians, hessians_are_constant, n_threads + ) + missing_values_bin_idx = n_bins - 1 + self.splitter = Splitter( + X_binned=X_binned, + n_bins_non_missing=n_bins_non_missing, + missing_values_bin_idx=missing_values_bin_idx, + has_missing_values=has_missing_values, + is_categorical=is_categorical, + monotonic_cst=monotonic_cst, + l2_regularization=l2_regularization, + min_hessian_to_split=min_hessian_to_split, + min_samples_leaf=min_samples_leaf, + min_gain_to_split=min_gain_to_split, + hessians_are_constant=hessians_are_constant, + feature_fraction_per_split=feature_fraction_per_split, + rng=rng, + n_threads=n_threads, + ) + self.X_binned = X_binned + self.max_leaf_nodes = max_leaf_nodes + self.max_depth = max_depth + self.min_samples_leaf = min_samples_leaf + self.min_gain_to_split = min_gain_to_split + self.n_bins_non_missing = n_bins_non_missing + self.missing_values_bin_idx = missing_values_bin_idx + self.has_missing_values = has_missing_values + self.is_categorical = is_categorical + self.monotonic_cst = monotonic_cst + self.interaction_cst = interaction_cst + self.l2_regularization = l2_regularization + self.shrinkage = shrinkage + self.n_features = X_binned.shape[1] + self.n_threads = n_threads + self.splittable_nodes = [] + self.finalized_leaves = [] + self.total_find_split_time = 0.0 # time spent finding the best splits + self.total_compute_hist_time = 0.0 # time spent computing histograms + self.total_apply_split_time = 0.0 # time spent splitting nodes + self.n_categorical_splits = 0 + self._intilialize_root(gradients, hessians, hessians_are_constant) + self.n_nodes = 1 + + def _validate_parameters( + self, + X_binned, + min_gain_to_split, + min_hessian_to_split, + ): + """Validate parameters passed to __init__. + + Also validate parameters passed to splitter. + """ + if X_binned.dtype != np.uint8: + raise NotImplementedError("X_binned must be of type uint8.") + if not X_binned.flags.f_contiguous: + raise ValueError( + "X_binned should be passed as Fortran contiguous " + "array for maximum efficiency." + ) + if min_gain_to_split < 0: + raise ValueError( + "min_gain_to_split={} must be positive.".format(min_gain_to_split) + ) + if min_hessian_to_split < 0: + raise ValueError( + "min_hessian_to_split={} must be positive.".format(min_hessian_to_split) + ) + + def grow(self): + """Grow the tree, from root to leaves.""" + while self.splittable_nodes: + self.split_next() + + self._apply_shrinkage() + + def _apply_shrinkage(self): + """Multiply leaves values by shrinkage parameter. + + This must be done at the very end of the growing process. If this were + done during the growing process e.g. in finalize_leaf(), then a leaf + would be shrunk but its sibling would potentially not be (if it's a + non-leaf), which would lead to a wrong computation of the 'middle' + value needed to enforce the monotonic constraints. + """ + for leaf in self.finalized_leaves: + leaf.value *= self.shrinkage + + def _intilialize_root(self, gradients, hessians, hessians_are_constant): + """Initialize root node and finalize it if needed.""" + n_samples = self.X_binned.shape[0] + depth = 0 + sum_gradients = sum_parallel(gradients, self.n_threads) + if self.histogram_builder.hessians_are_constant: + sum_hessians = hessians[0] * n_samples + else: + sum_hessians = sum_parallel(hessians, self.n_threads) + self.root = TreeNode( + depth=depth, + sample_indices=self.splitter.partition, + sum_gradients=sum_gradients, + sum_hessians=sum_hessians, + value=0, + ) + + self.root.partition_start = 0 + self.root.partition_stop = n_samples + + if self.root.n_samples < 2 * self.min_samples_leaf: + # Do not even bother computing any splitting statistics. + self._finalize_leaf(self.root) + return + if sum_hessians < self.splitter.min_hessian_to_split: + self._finalize_leaf(self.root) + return + + if self.interaction_cst is not None: + self.root.interaction_cst_indices = range(len(self.interaction_cst)) + allowed_features = set().union(*self.interaction_cst) + self.root.allowed_features = np.fromiter( + allowed_features, dtype=np.uint32, count=len(allowed_features) + ) + + tic = time() + self.root.histograms = self.histogram_builder.compute_histograms_brute( + self.root.sample_indices, self.root.allowed_features + ) + self.total_compute_hist_time += time() - tic + + tic = time() + self._compute_best_split_and_push(self.root) + self.total_find_split_time += time() - tic + + def _compute_best_split_and_push(self, node): + """Compute the best possible split (SplitInfo) of a given node. + + Also push it in the heap of splittable nodes if gain isn't zero. + The gain of a node is 0 if either all the leaves are pure + (best gain = 0), or if no split would satisfy the constraints, + (min_hessians_to_split, min_gain_to_split, min_samples_leaf) + """ + + node.split_info = self.splitter.find_node_split( + n_samples=node.n_samples, + histograms=node.histograms, + sum_gradients=node.sum_gradients, + sum_hessians=node.sum_hessians, + value=node.value, + lower_bound=node.children_lower_bound, + upper_bound=node.children_upper_bound, + allowed_features=node.allowed_features, + ) + + if node.split_info.gain <= 0: # no valid split + self._finalize_leaf(node) + else: + heappush(self.splittable_nodes, node) + + def split_next(self): + """Split the node with highest potential gain. + + Returns + ------- + left : TreeNode + The resulting left child. + right : TreeNode + The resulting right child. + """ + # Consider the node with the highest loss reduction (a.k.a. gain) + node = heappop(self.splittable_nodes) + + tic = time() + ( + sample_indices_left, + sample_indices_right, + right_child_pos, + ) = self.splitter.split_indices(node.split_info, node.sample_indices) + self.total_apply_split_time += time() - tic + + depth = node.depth + 1 + n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes) + n_leaf_nodes += 2 + + left_child_node = TreeNode( + depth, + sample_indices_left, + node.split_info.sum_gradient_left, + node.split_info.sum_hessian_left, + value=node.split_info.value_left, + ) + right_child_node = TreeNode( + depth, + sample_indices_right, + node.split_info.sum_gradient_right, + node.split_info.sum_hessian_right, + value=node.split_info.value_right, + ) + + node.right_child = right_child_node + node.left_child = left_child_node + + # set start and stop indices + left_child_node.partition_start = node.partition_start + left_child_node.partition_stop = node.partition_start + right_child_pos + right_child_node.partition_start = left_child_node.partition_stop + right_child_node.partition_stop = node.partition_stop + + # set interaction constraints (the indices of the constraints sets) + if self.interaction_cst is not None: + # Calculate allowed_features and interaction_cst_indices only once. Child + # nodes inherit them before they get split. + ( + left_child_node.allowed_features, + left_child_node.interaction_cst_indices, + ) = self._compute_interactions(node) + right_child_node.interaction_cst_indices = ( + left_child_node.interaction_cst_indices + ) + right_child_node.allowed_features = left_child_node.allowed_features + + if not self.has_missing_values[node.split_info.feature_idx]: + # If no missing values are encountered at fit time, then samples + # with missing values during predict() will go to whichever child + # has the most samples. + node.split_info.missing_go_to_left = ( + left_child_node.n_samples > right_child_node.n_samples + ) + + self.n_nodes += 2 + self.n_categorical_splits += node.split_info.is_categorical + + if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes: + self._finalize_leaf(left_child_node) + self._finalize_leaf(right_child_node) + self._finalize_splittable_nodes() + return left_child_node, right_child_node + + if self.max_depth is not None and depth == self.max_depth: + self._finalize_leaf(left_child_node) + self._finalize_leaf(right_child_node) + return left_child_node, right_child_node + + if left_child_node.n_samples < self.min_samples_leaf * 2: + self._finalize_leaf(left_child_node) + if right_child_node.n_samples < self.min_samples_leaf * 2: + self._finalize_leaf(right_child_node) + + if self.with_monotonic_cst: + # Set value bounds for respecting monotonic constraints + # See test_nodes_values() for details + if ( + self.monotonic_cst[node.split_info.feature_idx] + == MonotonicConstraint.NO_CST + ): + lower_left = lower_right = node.children_lower_bound + upper_left = upper_right = node.children_upper_bound + else: + mid = (left_child_node.value + right_child_node.value) / 2 + if ( + self.monotonic_cst[node.split_info.feature_idx] + == MonotonicConstraint.POS + ): + lower_left, upper_left = node.children_lower_bound, mid + lower_right, upper_right = mid, node.children_upper_bound + else: # NEG + lower_left, upper_left = mid, node.children_upper_bound + lower_right, upper_right = node.children_lower_bound, mid + left_child_node.set_children_bounds(lower_left, upper_left) + right_child_node.set_children_bounds(lower_right, upper_right) + + # Compute histograms of children, and compute their best possible split + # (if needed) + should_split_left = not left_child_node.is_leaf + should_split_right = not right_child_node.is_leaf + if should_split_left or should_split_right: + # We will compute the histograms of both nodes even if one of them + # is a leaf, since computing the second histogram is very cheap + # (using histogram subtraction). + n_samples_left = left_child_node.sample_indices.shape[0] + n_samples_right = right_child_node.sample_indices.shape[0] + if n_samples_left < n_samples_right: + smallest_child = left_child_node + largest_child = right_child_node + else: + smallest_child = right_child_node + largest_child = left_child_node + + # We use the brute O(n_samples) method on the child that has the + # smallest number of samples, and the subtraction trick O(n_bins) + # on the other one. + # Note that both left and right child have the same allowed_features. + tic = time() + smallest_child.histograms = self.histogram_builder.compute_histograms_brute( + smallest_child.sample_indices, smallest_child.allowed_features + ) + largest_child.histograms = ( + self.histogram_builder.compute_histograms_subtraction( + node.histograms, + smallest_child.histograms, + smallest_child.allowed_features, + ) + ) + # node.histograms is reused in largest_child.histograms. To break cyclic + # memory references and help garbage collection, we set it to None. + node.histograms = None + self.total_compute_hist_time += time() - tic + + tic = time() + if should_split_left: + self._compute_best_split_and_push(left_child_node) + if should_split_right: + self._compute_best_split_and_push(right_child_node) + self.total_find_split_time += time() - tic + + # Release memory used by histograms as they are no longer needed + # for leaf nodes since they won't be split. + for child in (left_child_node, right_child_node): + if child.is_leaf: + del child.histograms + + # Release memory used by histograms as they are no longer needed for + # internal nodes once children histograms have been computed. + del node.histograms + + return left_child_node, right_child_node + + def _compute_interactions(self, node): + r"""Compute features allowed by interactions to be inherited by child nodes. + + Example: Assume constraints [{0, 1}, {1, 2}]. + 1 <- Both constraint groups could be applied from now on + / \ + 1 2 <- Left split still fulfills both constraint groups. + / \ / \ Right split at feature 2 has only group {1, 2} from now on. + + LightGBM uses the same logic for overlapping groups. See + https://github.com/microsoft/LightGBM/issues/4481 for details. + + Parameters: + ---------- + node : TreeNode + A node that might have children. Based on its feature_idx, the interaction + constraints for possible child nodes are computed. + + Returns + ------- + allowed_features : ndarray, dtype=uint32 + Indices of features allowed to split for children. + interaction_cst_indices : list of ints + Indices of the interaction sets that have to be applied on splits of + child nodes. The fewer sets the stronger the constraint as fewer sets + contain fewer features. + """ + # Note: + # - Case of no interactions is already captured before function call. + # - This is for nodes that are already split and have a + # node.split_info.feature_idx. + allowed_features = set() + interaction_cst_indices = [] + for i in node.interaction_cst_indices: + if node.split_info.feature_idx in self.interaction_cst[i]: + interaction_cst_indices.append(i) + allowed_features.update(self.interaction_cst[i]) + return ( + np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)), + interaction_cst_indices, + ) + + def _finalize_leaf(self, node): + """Make node a leaf of the tree being grown.""" + + node.is_leaf = True + self.finalized_leaves.append(node) + + def _finalize_splittable_nodes(self): + """Transform all splittable nodes into leaves. + + Used when some constraint is met e.g. maximum number of leaves or + maximum depth.""" + while len(self.splittable_nodes) > 0: + node = self.splittable_nodes.pop() + self._finalize_leaf(node) + + def make_predictor(self, binning_thresholds): + """Make a TreePredictor object out of the current tree. + + Parameters + ---------- + binning_thresholds : array-like of floats + Corresponds to the bin_thresholds_ attribute of the BinMapper. + For each feature, this stores: + + - the bin frontiers for continuous features + - the unique raw category values for categorical features + + Returns + ------- + A TreePredictor object. + """ + predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE) + binned_left_cat_bitsets = np.zeros( + (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE + ) + raw_left_cat_bitsets = np.zeros( + (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE + ) + _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + self.root, + binning_thresholds, + self.n_bins_non_missing, + ) + return TreePredictor( + predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets + ) + + +def _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + grower_node, + binning_thresholds, + n_bins_non_missing, + next_free_node_idx=0, + next_free_bitset_idx=0, +): + """Helper used in make_predictor to set the TreePredictor fields.""" + node = predictor_nodes[next_free_node_idx] + node["count"] = grower_node.n_samples + node["depth"] = grower_node.depth + if grower_node.split_info is not None: + node["gain"] = grower_node.split_info.gain + else: + node["gain"] = -1 + + node["value"] = grower_node.value + + if grower_node.is_leaf: + # Leaf node + node["is_leaf"] = True + return next_free_node_idx + 1, next_free_bitset_idx + + split_info = grower_node.split_info + feature_idx, bin_idx = split_info.feature_idx, split_info.bin_idx + node["feature_idx"] = feature_idx + node["bin_threshold"] = bin_idx + node["missing_go_to_left"] = split_info.missing_go_to_left + node["is_categorical"] = split_info.is_categorical + + if split_info.bin_idx == n_bins_non_missing[feature_idx] - 1: + # Split is on the last non-missing bin: it's a "split on nans". + # All nans go to the right, the rest go to the left. + # Note: for categorical splits, bin_idx is 0 and we rely on the bitset + node["num_threshold"] = np.inf + elif split_info.is_categorical: + categories = binning_thresholds[feature_idx] + node["bitset_idx"] = next_free_bitset_idx + binned_left_cat_bitsets[next_free_bitset_idx] = split_info.left_cat_bitset + set_raw_bitset_from_binned_bitset( + raw_left_cat_bitsets[next_free_bitset_idx], + split_info.left_cat_bitset, + categories, + ) + next_free_bitset_idx += 1 + else: + node["num_threshold"] = binning_thresholds[feature_idx][bin_idx] + + next_free_node_idx += 1 + + node["left"] = next_free_node_idx + next_free_node_idx, next_free_bitset_idx = _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + grower_node.left_child, + binning_thresholds=binning_thresholds, + n_bins_non_missing=n_bins_non_missing, + next_free_node_idx=next_free_node_idx, + next_free_bitset_idx=next_free_bitset_idx, + ) + + node["right"] = next_free_node_idx + return _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + grower_node.right_child, + binning_thresholds=binning_thresholds, + n_bins_non_missing=n_bins_non_missing, + next_free_node_idx=next_free_node_idx, + next_free_bitset_idx=next_free_bitset_idx, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/utils.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..05e3bf052e6b5f3d6155e841e0c646e5fd57abf5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py new file mode 100644 index 0000000000000000000000000000000000000000..c975f121798f0e68538f17b0f25225d108424533 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py @@ -0,0 +1,562 @@ +# Authors: Nicolas Goix +# Alexandre Gramfort +# License: BSD 3 clause + +import numbers +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from scipy.sparse import issparse + +from ..base import OutlierMixin, _fit_context +from ..tree import ExtraTreeRegressor +from ..tree._tree import DTYPE as tree_dtype +from ..utils import ( + check_array, + check_random_state, + gen_batches, + get_chunk_n_rows, +) +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils.validation import _num_samples, check_is_fitted +from ._bagging import BaseBagging + +__all__ = ["IsolationForest"] + + +class IsolationForest(OutlierMixin, BaseBagging): + """ + Isolation Forest Algorithm. + + Return the anomaly score of each sample using the IsolationForest algorithm + + The IsolationForest 'isolates' observations by randomly selecting a feature + and then randomly selecting a split value between the maximum and minimum + values of the selected feature. + + Since recursive partitioning can be represented by a tree structure, the + number of splittings required to isolate a sample is equivalent to the path + length from the root node to the terminating node. + + This path length, averaged over a forest of such random trees, is a + measure of normality and our decision function. + + Random partitioning produces noticeably shorter paths for anomalies. + Hence, when a forest of random trees collectively produce shorter path + lengths for particular samples, they are highly likely to be anomalies. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + n_estimators : int, default=100 + The number of base estimators in the ensemble. + + max_samples : "auto", int or float, default="auto" + The number of samples to draw from X to train each base estimator. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + - If "auto", then `max_samples=min(256, n_samples)`. + + If max_samples is larger than the number of samples provided, + all samples will be used for all trees (no sampling). + + contamination : 'auto' or float, default='auto' + The amount of contamination of the data set, i.e. the proportion + of outliers in the data set. Used when fitting to define the threshold + on the scores of the samples. + + - If 'auto', the threshold is determined as in the + original paper. + - If float, the contamination should be in the range (0, 0.5]. + + .. versionchanged:: 0.22 + The default value of ``contamination`` changed from 0.1 + to ``'auto'``. + + max_features : int or float, default=1.0 + The number of features to draw from X to train each base estimator. + + - If int, then draw `max_features` features. + - If float, then draw `max(1, int(max_features * n_features_in_))` features. + + Note: using a float number less than 1.0 or integer less than number of + features will enable feature subsampling and leads to a longer runtime. + + bootstrap : bool, default=False + If True, individual trees are fit on random subsets of the training + data sampled with replacement. If False, sampling without replacement + is performed. + + n_jobs : int, default=None + The number of jobs to run in parallel for both :meth:`fit` and + :meth:`predict`. ``None`` means 1 unless in a + :obj:`joblib.parallel_backend` context. ``-1`` means using all + processors. See :term:`Glossary ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo-randomness of the selection of the feature + and split values for each branching step and each tree in the forest. + + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + verbose : int, default=0 + Controls the verbosity of the tree building process. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit a whole + new forest. See :term:`the Glossary `. + + .. versionadded:: 0.21 + + Attributes + ---------- + estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance + The child estimator template used to create the collection of + fitted sub-estimators. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of ExtraTreeRegressor instances + The collection of fitted sub-estimators. + + estimators_features_ : list of ndarray + The subset of drawn features for each base estimator. + + estimators_samples_ : list of ndarray + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. + + max_samples_ : int + The actual number of samples. + + offset_ : float + Offset used to define the decision function from the raw scores. We + have the relation: ``decision_function = score_samples - offset_``. + ``offset_`` is defined as follows. When the contamination parameter is + set to "auto", the offset is equal to -0.5 as the scores of inliers are + close to 0 and the scores of outliers are close to -1. When a + contamination parameter different than "auto" is provided, the offset + is defined in such a way we obtain the expected number of outliers + (samples with decision function < 0) in training. + + .. versionadded:: 0.20 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a + Gaussian distributed dataset. + sklearn.svm.OneClassSVM : Unsupervised Outlier Detection. + Estimate the support of a high-dimensional distribution. + The implementation is based on libsvm. + sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection + using Local Outlier Factor (LOF). + + Notes + ----- + The implementation is based on an ensemble of ExtraTreeRegressor. The + maximum depth of each tree is set to ``ceil(log_2(n))`` where + :math:`n` is the number of samples used to build the tree + (see (Liu et al., 2008) for more details). + + References + ---------- + .. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest." + Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on. + .. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based + anomaly detection." ACM Transactions on Knowledge Discovery from + Data (TKDD) 6.1 (2012): 3. + + Examples + -------- + >>> from sklearn.ensemble import IsolationForest + >>> X = [[-1.1], [0.3], [0.5], [100]] + >>> clf = IsolationForest(random_state=0).fit(X) + >>> clf.predict([[0.1], [0], [90]]) + array([ 1, 1, -1]) + + For an example of using isolation forest for anomaly detection see + :ref:`sphx_glr_auto_examples_ensemble_plot_isolation_forest.py`. + """ + + _parameter_constraints: dict = { + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "max_samples": [ + StrOptions({"auto"}), + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + "contamination": [ + StrOptions({"auto"}), + Interval(Real, 0, 0.5, closed="right"), + ], + "max_features": [ + Integral, + Interval(Real, 0, 1, closed="right"), + ], + "bootstrap": ["boolean"], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "verbose": ["verbose"], + "warm_start": ["boolean"], + } + + def __init__( + self, + *, + n_estimators=100, + max_samples="auto", + contamination="auto", + max_features=1.0, + bootstrap=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + ): + super().__init__( + estimator=ExtraTreeRegressor( + max_features=1, splitter="random", random_state=random_state + ), + # here above max_features has no links with self.max_features + bootstrap=bootstrap, + bootstrap_features=False, + n_estimators=n_estimators, + max_samples=max_samples, + max_features=max_features, + warm_start=warm_start, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + ) + + self.contamination = contamination + + def _set_oob_score(self, X, y): + raise NotImplementedError("OOB score not supported by iforest") + + def _parallel_args(self): + # ExtraTreeRegressor releases the GIL, so it's more efficient to use + # a thread-based backend rather than a process-based backend so as + # to avoid suffering from communication overhead and extra memory + # copies. + return {"prefer": "threads"} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """ + Fit estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Use ``dtype=np.float32`` for maximum + efficiency. Sparse matrices are also supported, use sparse + ``csc_matrix`` for maximum efficiency. + + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + self : object + Fitted estimator. + """ + X = self._validate_data(X, accept_sparse=["csc"], dtype=tree_dtype) + if issparse(X): + # Pre-sort indices to avoid that each individual tree of the + # ensemble sorts the indices. + X.sort_indices() + + rnd = check_random_state(self.random_state) + y = rnd.uniform(size=X.shape[0]) + + # ensure that max_sample is in [1, n_samples]: + n_samples = X.shape[0] + + if isinstance(self.max_samples, str) and self.max_samples == "auto": + max_samples = min(256, n_samples) + + elif isinstance(self.max_samples, numbers.Integral): + if self.max_samples > n_samples: + warn( + "max_samples (%s) is greater than the " + "total number of samples (%s). max_samples " + "will be set to n_samples for estimation." + % (self.max_samples, n_samples) + ) + max_samples = n_samples + else: + max_samples = self.max_samples + else: # max_samples is float + max_samples = int(self.max_samples * X.shape[0]) + + self.max_samples_ = max_samples + max_depth = int(np.ceil(np.log2(max(max_samples, 2)))) + super()._fit( + X, + y, + max_samples, + max_depth=max_depth, + sample_weight=sample_weight, + check_input=False, + ) + + self._average_path_length_per_tree, self._decision_path_lengths = zip( + *[ + ( + _average_path_length(tree.tree_.n_node_samples), + tree.tree_.compute_node_depths(), + ) + for tree in self.estimators_ + ] + ) + + if self.contamination == "auto": + # 0.5 plays a special role as described in the original paper. + # we take the opposite as we consider the opposite of their score. + self.offset_ = -0.5 + return self + + # Else, define offset_ wrt contamination parameter + # To avoid performing input validation a second time we call + # _score_samples rather than score_samples. + # _score_samples expects a CSR matrix, so we convert if necessary. + if issparse(X): + X = X.tocsr() + self.offset_ = np.percentile(self._score_samples(X), 100.0 * self.contamination) + + return self + + def predict(self, X): + """ + Predict if a particular sample is an outlier or not. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + For each observation, tells whether or not (+1 or -1) it should + be considered as an inlier according to the fitted model. + """ + check_is_fitted(self) + decision_func = self.decision_function(X) + is_inlier = np.ones_like(decision_func, dtype=int) + is_inlier[decision_func < 0] = -1 + return is_inlier + + def decision_function(self, X): + """ + Average anomaly score of X of the base classifiers. + + The anomaly score of an input sample is computed as + the mean anomaly score of the trees in the forest. + + The measure of normality of an observation given a tree is the depth + of the leaf containing this observation, which is equivalent to + the number of splittings required to isolate this point. In case of + several observations n_left in the leaf, the average path length of + a n_left samples isolation tree is added. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + scores : ndarray of shape (n_samples,) + The anomaly score of the input samples. + The lower, the more abnormal. Negative scores represent outliers, + positive scores represent inliers. + """ + # We subtract self.offset_ to make 0 be the threshold value for being + # an outlier: + + return self.score_samples(X) - self.offset_ + + def score_samples(self, X): + """ + Opposite of the anomaly score defined in the original paper. + + The anomaly score of an input sample is computed as + the mean anomaly score of the trees in the forest. + + The measure of normality of an observation given a tree is the depth + of the leaf containing this observation, which is equivalent to + the number of splittings required to isolate this point. In case of + several observations n_left in the leaf, the average path length of + a n_left samples isolation tree is added. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + Returns + ------- + scores : ndarray of shape (n_samples,) + The anomaly score of the input samples. + The lower, the more abnormal. + """ + # Check data + X = self._validate_data(X, accept_sparse="csr", dtype=tree_dtype, reset=False) + + return self._score_samples(X) + + def _score_samples(self, X): + """Private version of score_samples without input validation. + + Input validation would remove feature names, so we disable it. + """ + # Code structure from ForestClassifier/predict_proba + + check_is_fitted(self) + + # Take the opposite of the scores as bigger is better (here less abnormal) + return -self._compute_chunked_score_samples(X) + + def _compute_chunked_score_samples(self, X): + n_samples = _num_samples(X) + + if self._max_features == X.shape[1]: + subsample_features = False + else: + subsample_features = True + + # We get as many rows as possible within our working_memory budget + # (defined by sklearn.get_config()['working_memory']) to store + # self._max_features in each row during computation. + # + # Note: + # - this will get at least 1 row, even if 1 row of score will + # exceed working_memory. + # - this does only account for temporary memory usage while loading + # the data needed to compute the scores -- the returned scores + # themselves are 1D. + + chunk_n_rows = get_chunk_n_rows( + row_bytes=16 * self._max_features, max_n_rows=n_samples + ) + slices = gen_batches(n_samples, chunk_n_rows) + + scores = np.zeros(n_samples, order="f") + + for sl in slices: + # compute score on the slices of test samples: + scores[sl] = self._compute_score_samples(X[sl], subsample_features) + + return scores + + def _compute_score_samples(self, X, subsample_features): + """ + Compute the score of each samples in X going through the extra trees. + + Parameters + ---------- + X : array-like or sparse matrix + Data matrix. + + subsample_features : bool + Whether features should be subsampled. + """ + n_samples = X.shape[0] + + depths = np.zeros(n_samples, order="f") + + average_path_length_max_samples = _average_path_length([self._max_samples]) + + for tree_idx, (tree, features) in enumerate( + zip(self.estimators_, self.estimators_features_) + ): + X_subset = X[:, features] if subsample_features else X + + leaves_index = tree.apply(X_subset, check_input=False) + + depths += ( + self._decision_path_lengths[tree_idx][leaves_index] + + self._average_path_length_per_tree[tree_idx][leaves_index] + - 1.0 + ) + denominator = len(self.estimators_) * average_path_length_max_samples + scores = 2 ** ( + # For a single training sample, denominator and depth are 0. + # Therefore, we set the score manually to 1. + -np.divide( + depths, denominator, out=np.ones_like(depths), where=denominator != 0 + ) + ) + return scores + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + +def _average_path_length(n_samples_leaf): + """ + The average path length in a n_samples iTree, which is equal to + the average path length of an unsuccessful BST search since the + latter has the same structure as an isolation tree. + Parameters + ---------- + n_samples_leaf : array-like of shape (n_samples,) + The number of training samples in each test sample leaf, for + each estimators. + + Returns + ------- + average_path_length : ndarray of shape (n_samples,) + """ + + n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False) + + n_samples_leaf_shape = n_samples_leaf.shape + n_samples_leaf = n_samples_leaf.reshape((1, -1)) + average_path_length = np.zeros(n_samples_leaf.shape) + + mask_1 = n_samples_leaf <= 1 + mask_2 = n_samples_leaf == 2 + not_mask = ~np.logical_or(mask_1, mask_2) + + average_path_length[mask_1] = 0.0 + average_path_length[mask_2] = 1.0 + average_path_length[not_mask] = ( + 2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma) + - 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask] + ) + + return average_path_length.reshape(n_samples_leaf_shape) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_stacking.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_stacking.py new file mode 100644 index 0000000000000000000000000000000000000000..c028e85895b14889384914a71194fcfc81e18016 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_stacking.py @@ -0,0 +1,1017 @@ +"""Stacking classifier and regressor.""" + +# Authors: Guillaume Lemaitre +# License: BSD 3 clause + +from abc import ABCMeta, abstractmethod +from copy import deepcopy +from numbers import Integral + +import numpy as np +import scipy.sparse as sparse + +from ..base import ( + ClassifierMixin, + RegressorMixin, + TransformerMixin, + _fit_context, + clone, + is_classifier, + is_regressor, +) +from ..exceptions import NotFittedError +from ..linear_model import LogisticRegression, RidgeCV +from ..model_selection import check_cv, cross_val_predict +from ..preprocessing import LabelEncoder +from ..utils import Bunch +from ..utils._estimator_html_repr import _VisualBlock +from ..utils._param_validation import HasMethods, StrOptions +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.metaestimators import available_if +from ..utils.multiclass import check_classification_targets, type_of_target +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_feature_names_in, + _check_response_method, + check_is_fitted, + column_or_1d, +) +from ._base import _BaseHeterogeneousEnsemble, _fit_single_estimator + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `final_estimator_` if available, otherwise we check the + unfitted `final_estimator`. We raise the original `AttributeError` if `attr` does + not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "final_estimator_"): + getattr(self.final_estimator_, attr) + else: + getattr(self.final_estimator, attr) + + return True + + return check + + +class _BaseStacking(TransformerMixin, _BaseHeterogeneousEnsemble, metaclass=ABCMeta): + """Base class for stacking method.""" + + _parameter_constraints: dict = { + "estimators": [list], + "final_estimator": [None, HasMethods("fit")], + "cv": ["cv_object", StrOptions({"prefit"})], + "n_jobs": [None, Integral], + "passthrough": ["boolean"], + "verbose": ["verbose"], + } + + @abstractmethod + def __init__( + self, + estimators, + final_estimator=None, + *, + cv=None, + stack_method="auto", + n_jobs=None, + verbose=0, + passthrough=False, + ): + super().__init__(estimators=estimators) + self.final_estimator = final_estimator + self.cv = cv + self.stack_method = stack_method + self.n_jobs = n_jobs + self.verbose = verbose + self.passthrough = passthrough + + def _clone_final_estimator(self, default): + if self.final_estimator is not None: + self.final_estimator_ = clone(self.final_estimator) + else: + self.final_estimator_ = clone(default) + + def _concatenate_predictions(self, X, predictions): + """Concatenate the predictions of each first layer learner and + possibly the input dataset `X`. + + If `X` is sparse and `self.passthrough` is False, the output of + `transform` will be dense (the predictions). If `X` is sparse + and `self.passthrough` is True, the output of `transform` will + be sparse. + + This helper is in charge of ensuring the predictions are 2D arrays and + it will drop one of the probability column when using probabilities + in the binary case. Indeed, the p(y|c=0) = 1 - p(y|c=1) + + When `y` type is `"multilabel-indicator"`` and the method used is + `predict_proba`, `preds` can be either a `ndarray` of shape + `(n_samples, n_class)` or for some estimators a list of `ndarray`. + This function will drop one of the probability column in this situation as well. + """ + X_meta = [] + for est_idx, preds in enumerate(predictions): + if isinstance(preds, list): + # `preds` is here a list of `n_targets` 2D ndarrays of + # `n_classes` columns. The k-th column contains the + # probabilities of the samples belonging the k-th class. + # + # Since those probabilities must sum to one for each sample, + # we can work with probabilities of `n_classes - 1` classes. + # Hence we drop the first column. + for pred in preds: + X_meta.append(pred[:, 1:]) + elif preds.ndim == 1: + # Some estimator return a 1D array for predictions + # which must be 2-dimensional arrays. + X_meta.append(preds.reshape(-1, 1)) + elif ( + self.stack_method_[est_idx] == "predict_proba" + and len(self.classes_) == 2 + ): + # Remove the first column when using probabilities in + # binary classification because both features `preds` are perfectly + # collinear. + X_meta.append(preds[:, 1:]) + else: + X_meta.append(preds) + + self._n_feature_outs = [pred.shape[1] for pred in X_meta] + if self.passthrough: + X_meta.append(X) + if sparse.issparse(X): + return sparse.hstack(X_meta, format=X.format) + + return np.hstack(X_meta) + + @staticmethod + def _method_name(name, estimator, method): + if estimator == "drop": + return None + if method == "auto": + method = ["predict_proba", "decision_function", "predict"] + try: + method_name = _check_response_method(estimator, method).__name__ + except AttributeError as e: + raise ValueError( + f"Underlying estimator {name} does not implement the method {method}." + ) from e + + return method_name + + @_fit_context( + # estimators in Stacking*.estimators are not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Fit the estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,) or default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if all underlying estimators + support sample weights. + + .. versionchanged:: 0.23 + when not None, `sample_weight` is passed to all underlying + estimators + + Returns + ------- + self : object + """ + # all_estimators contains all estimators, the one to be fitted and the + # 'drop' string. + names, all_estimators = self._validate_estimators() + self._validate_final_estimator() + + stack_method = [self.stack_method] * len(all_estimators) + + if self.cv == "prefit": + self.estimators_ = [] + for estimator in all_estimators: + if estimator != "drop": + check_is_fitted(estimator) + self.estimators_.append(estimator) + else: + # Fit the base estimators on the whole training data. Those + # base estimators will be used in transform, predict, and + # predict_proba. They are exposed publicly. + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_fit_single_estimator)(clone(est), X, y, sample_weight) + for est in all_estimators + if est != "drop" + ) + + self.named_estimators_ = Bunch() + est_fitted_idx = 0 + for name_est, org_est in zip(names, all_estimators): + if org_est != "drop": + current_estimator = self.estimators_[est_fitted_idx] + self.named_estimators_[name_est] = current_estimator + est_fitted_idx += 1 + if hasattr(current_estimator, "feature_names_in_"): + self.feature_names_in_ = current_estimator.feature_names_in_ + else: + self.named_estimators_[name_est] = "drop" + + self.stack_method_ = [ + self._method_name(name, est, meth) + for name, est, meth in zip(names, all_estimators, stack_method) + ] + + if self.cv == "prefit": + # Generate predictions from prefit models + predictions = [ + getattr(estimator, predict_method)(X) + for estimator, predict_method in zip(all_estimators, self.stack_method_) + if estimator != "drop" + ] + else: + # To train the meta-classifier using the most data as possible, we use + # a cross-validation to obtain the output of the stacked estimators. + # To ensure that the data provided to each estimator are the same, + # we need to set the random state of the cv if there is one and we + # need to take a copy. + cv = check_cv(self.cv, y=y, classifier=is_classifier(self)) + if hasattr(cv, "random_state") and cv.random_state is None: + cv.random_state = np.random.RandomState() + + fit_params = ( + {"sample_weight": sample_weight} if sample_weight is not None else None + ) + predictions = Parallel(n_jobs=self.n_jobs)( + delayed(cross_val_predict)( + clone(est), + X, + y, + cv=deepcopy(cv), + method=meth, + n_jobs=self.n_jobs, + params=fit_params, + verbose=self.verbose, + ) + for est, meth in zip(all_estimators, self.stack_method_) + if est != "drop" + ) + + # Only not None or not 'drop' estimators will be used in transform. + # Remove the None from the method as well. + self.stack_method_ = [ + meth + for (meth, est) in zip(self.stack_method_, all_estimators) + if est != "drop" + ] + + X_meta = self._concatenate_predictions(X, predictions) + _fit_single_estimator( + self.final_estimator_, X_meta, y, sample_weight=sample_weight + ) + + return self + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`.""" + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + f"{self.__class__.__name__} object has no attribute n_features_in_" + ) from nfe + return self.estimators_[0].n_features_in_ + + def _transform(self, X): + """Concatenate and return the predictions of the estimators.""" + check_is_fitted(self) + predictions = [ + getattr(est, meth)(X) + for est, meth in zip(self.estimators_, self.stack_method_) + if est != "drop" + ] + return self._concatenate_predictions(X, predictions) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. The input feature names are only used when `passthrough` is + `True`. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + If `passthrough` is `False`, then only the names of `estimators` are used + to generate the output feature names. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in( + self, input_features, generate_names=self.passthrough + ) + + class_name = self.__class__.__name__.lower() + non_dropped_estimators = ( + name for name, est in self.estimators if est != "drop" + ) + meta_names = [] + for est, n_features_out in zip(non_dropped_estimators, self._n_feature_outs): + if n_features_out == 1: + meta_names.append(f"{class_name}_{est}") + else: + meta_names.extend( + f"{class_name}_{est}{i}" for i in range(n_features_out) + ) + + if self.passthrough: + return np.concatenate((meta_names, input_features)) + + return np.asarray(meta_names, dtype=object) + + @available_if(_estimator_has("predict")) + def predict(self, X, **predict_params): + """Predict target for X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + **predict_params : dict of str -> obj + Parameters to the `predict` called by the `final_estimator`. Note + that this may be used to return uncertainties from some estimators + with `return_std` or `return_cov`. Be aware that it will only + accounts for uncertainty in the final estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) + Predicted targets. + """ + + check_is_fitted(self) + return self.final_estimator_.predict(self.transform(X), **predict_params) + + def _sk_visual_block_with_final_estimator(self, final_estimator): + names, estimators = zip(*self.estimators) + parallel = _VisualBlock("parallel", estimators, names=names, dash_wrapped=False) + + # final estimator is wrapped in a parallel block to show the label: + # 'final_estimator' in the html repr + final_block = _VisualBlock( + "parallel", [final_estimator], names=["final_estimator"], dash_wrapped=False + ) + return _VisualBlock("serial", (parallel, final_block), dash_wrapped=False) + + +class StackingClassifier(_RoutingNotSupportedMixin, ClassifierMixin, _BaseStacking): + """Stack of estimators with a final classifier. + + Stacked generalization consists in stacking the output of individual + estimator and use a classifier to compute the final prediction. Stacking + allows to use the strength of each individual estimator by using their + output as input of a final estimator. + + Note that `estimators_` are fitted on the full `X` while `final_estimator_` + is trained using cross-validated predictions of the base estimators using + `cross_val_predict`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + estimators : list of (str, estimator) + Base estimators which will be stacked together. Each element of the + list is defined as a tuple of string (i.e. name) and an estimator + instance. An estimator can be set to 'drop' using `set_params`. + + The type of estimator is generally expected to be a classifier. + However, one can pass a regressor for some use case (e.g. ordinal + regression). + + final_estimator : estimator, default=None + A classifier which will be used to combine the base estimators. + The default classifier is a + :class:`~sklearn.linear_model.LogisticRegression`. + + cv : int, cross-validation generator, iterable, or "prefit", default=None + Determines the cross-validation splitting strategy used in + `cross_val_predict` to train `final_estimator`. Possible inputs for + cv are: + + * None, to use the default 5-fold cross validation, + * integer, to specify the number of folds in a (Stratified) KFold, + * An object to be used as a cross-validation generator, + * An iterable yielding train, test splits, + * `"prefit"` to assume the `estimators` are prefit. In this case, the + estimators will not be refitted. + + For integer/None inputs, if the estimator is a classifier and y is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. + In all other cases, :class:`~sklearn.model_selection.KFold` is used. + These splitters are instantiated with `shuffle=False` so the splits + will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + If "prefit" is passed, it is assumed that all `estimators` have + been fitted already. The `final_estimator_` is trained on the `estimators` + predictions on the full training set and are **not** cross validated + predictions. Please note that if the models have been trained on the same + data to train the stacking model, there is a very high risk of overfitting. + + .. versionadded:: 1.1 + The 'prefit' option was added in 1.1 + + .. note:: + A larger number of split will provide no benefits if the number + of training samples is large enough. Indeed, the training time + will increase. ``cv`` is not used for model evaluation but for + prediction. + + stack_method : {'auto', 'predict_proba', 'decision_function', 'predict'}, \ + default='auto' + Methods called for each base estimator. It can be: + + * if 'auto', it will try to invoke, for each estimator, + `'predict_proba'`, `'decision_function'` or `'predict'` in that + order. + * otherwise, one of `'predict_proba'`, `'decision_function'` or + `'predict'`. If the method is not implemented by the estimator, it + will raise an error. + + n_jobs : int, default=None + The number of jobs to run in parallel all `estimators` `fit`. + `None` means 1 unless in a `joblib.parallel_backend` context. -1 means + using all processors. See Glossary for more details. + + passthrough : bool, default=False + When False, only the predictions of estimators will be used as + training data for `final_estimator`. When True, the + `final_estimator` is trained on the predictions as well as the + original training data. + + verbose : int, default=0 + Verbosity level. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of ndarray if `y` \ + is of type `"multilabel-indicator"`. + Class labels. + + estimators_ : list of estimators + The elements of the `estimators` parameter, having been fitted on the + training data. If an estimator has been set to `'drop'`, it + will not appear in `estimators_`. When `cv="prefit"`, `estimators_` + is set to `estimators` and is not fitted again. + + named_estimators_ : :class:`~sklearn.utils.Bunch` + Attribute to access any fitted sub-estimators by name. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying classifier exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + final_estimator_ : estimator + The classifier which predicts given the output of `estimators_`. + + stack_method_ : list of str + The method used by each base estimator. + + See Also + -------- + StackingRegressor : Stack of estimators with a final regressor. + + Notes + ----- + When `predict_proba` is used by each estimator (i.e. most of the time for + `stack_method='auto'` or specifically for `stack_method='predict_proba'`), + The first column predicted by each estimator will be dropped in the case + of a binary classification problem. Indeed, both feature will be perfectly + collinear. + + In some cases (e.g. ordinal regression), one can pass regressors as the + first layer of the :class:`StackingClassifier`. However, note that `y` will + be internally encoded in a numerically increasing order or lexicographic + order. If this ordering is not adequate, one should manually numerically + encode the classes in the desired order. + + References + ---------- + .. [1] Wolpert, David H. "Stacked generalization." Neural networks 5.2 + (1992): 241-259. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.svm import LinearSVC + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.ensemble import StackingClassifier + >>> X, y = load_iris(return_X_y=True) + >>> estimators = [ + ... ('rf', RandomForestClassifier(n_estimators=10, random_state=42)), + ... ('svr', make_pipeline(StandardScaler(), + ... LinearSVC(dual="auto", random_state=42))) + ... ] + >>> clf = StackingClassifier( + ... estimators=estimators, final_estimator=LogisticRegression() + ... ) + >>> from sklearn.model_selection import train_test_split + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, stratify=y, random_state=42 + ... ) + >>> clf.fit(X_train, y_train).score(X_test, y_test) + 0.9... + """ + + _parameter_constraints: dict = { + **_BaseStacking._parameter_constraints, + "stack_method": [ + StrOptions({"auto", "predict_proba", "decision_function", "predict"}) + ], + } + + def __init__( + self, + estimators, + final_estimator=None, + *, + cv=None, + stack_method="auto", + n_jobs=None, + passthrough=False, + verbose=0, + ): + super().__init__( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + stack_method=stack_method, + n_jobs=n_jobs, + passthrough=passthrough, + verbose=verbose, + ) + + def _validate_final_estimator(self): + self._clone_final_estimator(default=LogisticRegression()) + if not is_classifier(self.final_estimator_): + raise ValueError( + "'final_estimator' parameter should be a classifier. Got {}".format( + self.final_estimator_ + ) + ) + + def _validate_estimators(self): + """Overload the method of `_BaseHeterogeneousEnsemble` to be more + lenient towards the type of `estimators`. + + Regressors can be accepted for some cases such as ordinal regression. + """ + if len(self.estimators) == 0: + raise ValueError( + "Invalid 'estimators' attribute, 'estimators' should be a " + "non-empty list of (string, estimator) tuples." + ) + names, estimators = zip(*self.estimators) + self._validate_names(names) + + has_estimator = any(est != "drop" for est in estimators) + if not has_estimator: + raise ValueError( + "All estimators are dropped. At least one is required " + "to be an estimator." + ) + + return names, estimators + + def fit(self, X, y, sample_weight=None): + """Fit the estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. Note that `y` will be internally encoded in + numerically increasing order or lexicographic order. If the order + matter (e.g. for ordinal regression), one should numerically encode + the target `y` before calling :term:`fit`. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if all underlying estimators + support sample weights. + + Returns + ------- + self : object + Returns a fitted instance of estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + check_classification_targets(y) + if type_of_target(y) == "multilabel-indicator": + self._label_encoder = [LabelEncoder().fit(yk) for yk in y.T] + self.classes_ = [le.classes_ for le in self._label_encoder] + y_encoded = np.array( + [ + self._label_encoder[target_idx].transform(target) + for target_idx, target in enumerate(y.T) + ] + ).T + else: + self._label_encoder = LabelEncoder().fit(y) + self.classes_ = self._label_encoder.classes_ + y_encoded = self._label_encoder.transform(y) + return super().fit(X, y_encoded, sample_weight) + + @available_if(_estimator_has("predict")) + def predict(self, X, **predict_params): + """Predict target for X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + **predict_params : dict of str -> obj + Parameters to the `predict` called by the `final_estimator`. Note + that this may be used to return uncertainties from some estimators + with `return_std` or `return_cov`. Be aware that it will only + accounts for uncertainty in the final estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) + Predicted targets. + """ + y_pred = super().predict(X, **predict_params) + if isinstance(self._label_encoder, list): + # Handle the multilabel-indicator case + y_pred = np.array( + [ + self._label_encoder[target_idx].inverse_transform(target) + for target_idx, target in enumerate(y_pred.T) + ] + ).T + else: + y_pred = self._label_encoder.inverse_transform(y_pred) + return y_pred + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Predict class probabilities for `X` using the final estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + probabilities : ndarray of shape (n_samples, n_classes) or \ + list of ndarray of shape (n_output,) + The class probabilities of the input samples. + """ + check_is_fitted(self) + y_pred = self.final_estimator_.predict_proba(self.transform(X)) + + if isinstance(self._label_encoder, list): + # Handle the multilabel-indicator cases + y_pred = np.array([preds[:, 0] for preds in y_pred]).T + return y_pred + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Decision function for samples in `X` using the final estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + decisions : ndarray of shape (n_samples,), (n_samples, n_classes), \ + or (n_samples, n_classes * (n_classes-1) / 2) + The decision function computed the final estimator. + """ + check_is_fitted(self) + return self.final_estimator_.decision_function(self.transform(X)) + + def transform(self, X): + """Return class labels or probabilities for X for each estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + y_preds : ndarray of shape (n_samples, n_estimators) or \ + (n_samples, n_classes * n_estimators) + Prediction outputs for each estimator. + """ + return self._transform(X) + + def _sk_visual_block_(self): + # If final_estimator's default changes then this should be + # updated. + if self.final_estimator is None: + final_estimator = LogisticRegression() + else: + final_estimator = self.final_estimator + return super()._sk_visual_block_with_final_estimator(final_estimator) + + +class StackingRegressor(_RoutingNotSupportedMixin, RegressorMixin, _BaseStacking): + """Stack of estimators with a final regressor. + + Stacked generalization consists in stacking the output of individual + estimator and use a regressor to compute the final prediction. Stacking + allows to use the strength of each individual estimator by using their + output as input of a final estimator. + + Note that `estimators_` are fitted on the full `X` while `final_estimator_` + is trained using cross-validated predictions of the base estimators using + `cross_val_predict`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + estimators : list of (str, estimator) + Base estimators which will be stacked together. Each element of the + list is defined as a tuple of string (i.e. name) and an estimator + instance. An estimator can be set to 'drop' using `set_params`. + + final_estimator : estimator, default=None + A regressor which will be used to combine the base estimators. + The default regressor is a :class:`~sklearn.linear_model.RidgeCV`. + + cv : int, cross-validation generator, iterable, or "prefit", default=None + Determines the cross-validation splitting strategy used in + `cross_val_predict` to train `final_estimator`. Possible inputs for + cv are: + + * None, to use the default 5-fold cross validation, + * integer, to specify the number of folds in a (Stratified) KFold, + * An object to be used as a cross-validation generator, + * An iterable yielding train, test splits. + * "prefit" to assume the `estimators` are prefit, and skip cross validation + + For integer/None inputs, if the estimator is a classifier and y is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. + In all other cases, :class:`~sklearn.model_selection.KFold` is used. + These splitters are instantiated with `shuffle=False` so the splits + will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + If "prefit" is passed, it is assumed that all `estimators` have + been fitted already. The `final_estimator_` is trained on the `estimators` + predictions on the full training set and are **not** cross validated + predictions. Please note that if the models have been trained on the same + data to train the stacking model, there is a very high risk of overfitting. + + .. versionadded:: 1.1 + The 'prefit' option was added in 1.1 + + .. note:: + A larger number of split will provide no benefits if the number + of training samples is large enough. Indeed, the training time + will increase. ``cv`` is not used for model evaluation but for + prediction. + + n_jobs : int, default=None + The number of jobs to run in parallel for `fit` of all `estimators`. + `None` means 1 unless in a `joblib.parallel_backend` context. -1 means + using all processors. See Glossary for more details. + + passthrough : bool, default=False + When False, only the predictions of estimators will be used as + training data for `final_estimator`. When True, the + `final_estimator` is trained on the predictions as well as the + original training data. + + verbose : int, default=0 + Verbosity level. + + Attributes + ---------- + estimators_ : list of estimator + The elements of the `estimators` parameter, having been fitted on the + training data. If an estimator has been set to `'drop'`, it + will not appear in `estimators_`. When `cv="prefit"`, `estimators_` + is set to `estimators` and is not fitted again. + + named_estimators_ : :class:`~sklearn.utils.Bunch` + Attribute to access any fitted sub-estimators by name. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying regressor exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + final_estimator_ : estimator + The regressor to stacked the base estimators fitted. + + stack_method_ : list of str + The method used by each base estimator. + + See Also + -------- + StackingClassifier : Stack of estimators with a final classifier. + + References + ---------- + .. [1] Wolpert, David H. "Stacked generalization." Neural networks 5.2 + (1992): 241-259. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import RidgeCV + >>> from sklearn.svm import LinearSVR + >>> from sklearn.ensemble import RandomForestRegressor + >>> from sklearn.ensemble import StackingRegressor + >>> X, y = load_diabetes(return_X_y=True) + >>> estimators = [ + ... ('lr', RidgeCV()), + ... ('svr', LinearSVR(dual="auto", random_state=42)) + ... ] + >>> reg = StackingRegressor( + ... estimators=estimators, + ... final_estimator=RandomForestRegressor(n_estimators=10, + ... random_state=42) + ... ) + >>> from sklearn.model_selection import train_test_split + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=42 + ... ) + >>> reg.fit(X_train, y_train).score(X_test, y_test) + 0.3... + """ + + def __init__( + self, + estimators, + final_estimator=None, + *, + cv=None, + n_jobs=None, + passthrough=False, + verbose=0, + ): + super().__init__( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + stack_method="predict", + n_jobs=n_jobs, + passthrough=passthrough, + verbose=verbose, + ) + + def _validate_final_estimator(self): + self._clone_final_estimator(default=RidgeCV()) + if not is_regressor(self.final_estimator_): + raise ValueError( + "'final_estimator' parameter should be a regressor. Got {}".format( + self.final_estimator_ + ) + ) + + def fit(self, X, y, sample_weight=None): + """Fit the estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if all underlying estimators + support sample weights. + + Returns + ------- + self : object + Returns a fitted instance. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + y = column_or_1d(y, warn=True) + return super().fit(X, y, sample_weight) + + def transform(self, X): + """Return the predictions for X for each estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + y_preds : ndarray of shape (n_samples, n_estimators) + Prediction outputs for each estimator. + """ + return self._transform(X) + + def fit_transform(self, X, y, sample_weight=None): + """Fit the estimators and return the predictions for X for each estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if all underlying estimators + support sample weights. + + Returns + ------- + y_preds : ndarray of shape (n_samples, n_estimators) + Prediction outputs for each estimator. + """ + return super().fit_transform(X, y, sample_weight=sample_weight) + + def _sk_visual_block_(self): + # If final_estimator's default changes then this should be + # updated. + if self.final_estimator is None: + final_estimator = RidgeCV() + else: + final_estimator = self.final_estimator + return super()._sk_visual_block_with_final_estimator(final_estimator) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_voting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_voting.py new file mode 100644 index 0000000000000000000000000000000000000000..48cb104019e858f78a8680cb9d7db39c546417c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_voting.py @@ -0,0 +1,679 @@ +""" +Soft Voting/Majority Rule classifier and Voting regressor. + +This module contains: + - A Soft Voting/Majority Rule classifier for classification estimators. + - A Voting regressor for regression estimators. +""" + +# Authors: Sebastian Raschka , +# Gilles Louppe , +# Ramil Nugmanov +# Mohamed Ali Jamaoui +# +# License: BSD 3 clause + +from abc import abstractmethod +from numbers import Integral + +import numpy as np + +from ..base import ( + ClassifierMixin, + RegressorMixin, + TransformerMixin, + _fit_context, + clone, +) +from ..exceptions import NotFittedError +from ..preprocessing import LabelEncoder +from ..utils import Bunch +from ..utils._estimator_html_repr import _VisualBlock +from ..utils._param_validation import StrOptions +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.metaestimators import available_if +from ..utils.multiclass import type_of_target +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_feature_names_in, + check_is_fitted, + column_or_1d, +) +from ._base import _BaseHeterogeneousEnsemble, _fit_single_estimator + + +class _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble): + """Base class for voting. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + _parameter_constraints: dict = { + "estimators": [list], + "weights": ["array-like", None], + "n_jobs": [None, Integral], + "verbose": ["verbose"], + } + + def _log_message(self, name, idx, total): + if not self.verbose: + return None + return f"({idx} of {total}) Processing {name}" + + @property + def _weights_not_none(self): + """Get the weights of not `None` estimators.""" + if self.weights is None: + return None + return [w for est, w in zip(self.estimators, self.weights) if est[1] != "drop"] + + def _predict(self, X): + """Collect results from clf.predict calls.""" + return np.asarray([est.predict(X) for est in self.estimators_]).T + + @abstractmethod + def fit(self, X, y, sample_weight=None): + """Get common fit operations.""" + names, clfs = self._validate_estimators() + + if self.weights is not None and len(self.weights) != len(self.estimators): + raise ValueError( + "Number of `estimators` and weights must be equal; got" + f" {len(self.weights)} weights, {len(self.estimators)} estimators" + ) + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_fit_single_estimator)( + clone(clf), + X, + y, + sample_weight=sample_weight, + message_clsname="Voting", + message=self._log_message(names[idx], idx + 1, len(clfs)), + ) + for idx, clf in enumerate(clfs) + if clf != "drop" + ) + + self.named_estimators_ = Bunch() + + # Uses 'drop' as placeholder for dropped estimators + est_iter = iter(self.estimators_) + for name, est in self.estimators: + current_est = est if est == "drop" else next(est_iter) + self.named_estimators_[name] = current_est + + if hasattr(current_est, "feature_names_in_"): + self.feature_names_in_ = current_est.feature_names_in_ + + return self + + def fit_transform(self, X, y=None, **fit_params): + """Return class labels or probabilities for each estimator. + + Return predictions for X for each estimator. + + Parameters + ---------- + X : {array-like, sparse matrix, dataframe} of shape \ + (n_samples, n_features) + Input samples. + + y : ndarray of shape (n_samples,), default=None + Target values (None for unsupervised transformations). + + **fit_params : dict + Additional fit parameters. + + Returns + ------- + X_new : ndarray array of shape (n_samples, n_features_new) + Transformed array. + """ + return super().fit_transform(X, y, **fit_params) + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`.""" + # For consistency with other estimators we raise a AttributeError so + # that hasattr() fails if the estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.estimators_[0].n_features_in_ + + def _sk_visual_block_(self): + names, estimators = zip(*self.estimators) + return _VisualBlock("parallel", estimators, names=names) + + +class VotingClassifier(_RoutingNotSupportedMixin, ClassifierMixin, _BaseVoting): + """Soft Voting/Majority Rule classifier for unfitted estimators. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.17 + + Parameters + ---------- + estimators : list of (str, estimator) tuples + Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones + of those original estimators that will be stored in the class attribute + ``self.estimators_``. An estimator can be set to ``'drop'`` using + :meth:`set_params`. + + .. versionchanged:: 0.21 + ``'drop'`` is accepted. Using None was deprecated in 0.22 and + support was removed in 0.24. + + voting : {'hard', 'soft'}, default='hard' + If 'hard', uses predicted class labels for majority rule voting. + Else if 'soft', predicts the class label based on the argmax of + the sums of the predicted probabilities, which is recommended for + an ensemble of well-calibrated classifiers. + + weights : array-like of shape (n_classifiers,), default=None + Sequence of weights (`float` or `int`) to weight the occurrences of + predicted class labels (`hard` voting) or class probabilities + before averaging (`soft` voting). Uses uniform weights if `None`. + + n_jobs : int, default=None + The number of jobs to run in parallel for ``fit``. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.18 + + flatten_transform : bool, default=True + Affects shape of transform output only when voting='soft' + If voting='soft' and flatten_transform=True, transform method returns + matrix with shape (n_samples, n_classifiers * n_classes). If + flatten_transform=False, it returns + (n_classifiers, n_samples, n_classes). + + verbose : bool, default=False + If True, the time elapsed while fitting will be printed as it + is completed. + + .. versionadded:: 0.23 + + Attributes + ---------- + estimators_ : list of classifiers + The collection of fitted sub-estimators as defined in ``estimators`` + that are not 'drop'. + + named_estimators_ : :class:`~sklearn.utils.Bunch` + Attribute to access any fitted sub-estimators by name. + + .. versionadded:: 0.20 + + le_ : :class:`~sklearn.preprocessing.LabelEncoder` + Transformer used to encode the labels during fit and decode during + prediction. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying classifier exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + VotingRegressor : Prediction voting regressor. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.naive_bayes import GaussianNB + >>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier + >>> clf1 = LogisticRegression(multi_class='multinomial', random_state=1) + >>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1) + >>> clf3 = GaussianNB() + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> eclf1 = VotingClassifier(estimators=[ + ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') + >>> eclf1 = eclf1.fit(X, y) + >>> print(eclf1.predict(X)) + [1 1 1 2 2 2] + >>> np.array_equal(eclf1.named_estimators_.lr.predict(X), + ... eclf1.named_estimators_['lr'].predict(X)) + True + >>> eclf2 = VotingClassifier(estimators=[ + ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], + ... voting='soft') + >>> eclf2 = eclf2.fit(X, y) + >>> print(eclf2.predict(X)) + [1 1 1 2 2 2] + + To drop an estimator, :meth:`set_params` can be used to remove it. Here we + dropped one of the estimators, resulting in 2 fitted estimators: + + >>> eclf2 = eclf2.set_params(lr='drop') + >>> eclf2 = eclf2.fit(X, y) + >>> len(eclf2.estimators_) + 2 + + Setting `flatten_transform=True` with `voting='soft'` flattens output shape of + `transform`: + + >>> eclf3 = VotingClassifier(estimators=[ + ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], + ... voting='soft', weights=[2,1,1], + ... flatten_transform=True) + >>> eclf3 = eclf3.fit(X, y) + >>> print(eclf3.predict(X)) + [1 1 1 2 2 2] + >>> print(eclf3.transform(X).shape) + (6, 6) + """ + + _parameter_constraints: dict = { + **_BaseVoting._parameter_constraints, + "voting": [StrOptions({"hard", "soft"})], + "flatten_transform": ["boolean"], + } + + def __init__( + self, + estimators, + *, + voting="hard", + weights=None, + n_jobs=None, + flatten_transform=True, + verbose=False, + ): + super().__init__(estimators=estimators) + self.voting = voting + self.weights = weights + self.n_jobs = n_jobs + self.flatten_transform = flatten_transform + self.verbose = verbose + + @_fit_context( + # estimators in VotingClassifier.estimators are not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Fit the estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if all underlying estimators + support sample weights. + + .. versionadded:: 0.18 + + Returns + ------- + self : object + Returns the instance itself. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + y_type = type_of_target(y, input_name="y") + if y_type in ("unknown", "continuous"): + # raise a specific ValueError for non-classification tasks + raise ValueError( + f"Unknown label type: {y_type}. Maybe you are trying to fit a " + "classifier, which expects discrete classes on a " + "regression target with continuous values." + ) + elif y_type not in ("binary", "multiclass"): + # raise a NotImplementedError for backward compatibility for non-supported + # classification tasks + raise NotImplementedError( + f"{self.__class__.__name__} only supports binary or multiclass " + "classification. Multilabel and multi-output classification are not " + "supported." + ) + + self.le_ = LabelEncoder().fit(y) + self.classes_ = self.le_.classes_ + transformed_y = self.le_.transform(y) + + return super().fit(X, transformed_y, sample_weight) + + def predict(self, X): + """Predict class labels for X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + Returns + ------- + maj : array-like of shape (n_samples,) + Predicted class labels. + """ + check_is_fitted(self) + if self.voting == "soft": + maj = np.argmax(self.predict_proba(X), axis=1) + + else: # 'hard' voting + predictions = self._predict(X) + maj = np.apply_along_axis( + lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)), + axis=1, + arr=predictions, + ) + + maj = self.le_.inverse_transform(maj) + + return maj + + def _collect_probas(self, X): + """Collect results from clf.predict calls.""" + return np.asarray([clf.predict_proba(X) for clf in self.estimators_]) + + def _check_voting(self): + if self.voting == "hard": + raise AttributeError( + f"predict_proba is not available when voting={repr(self.voting)}" + ) + return True + + @available_if(_check_voting) + def predict_proba(self, X): + """Compute probabilities of possible outcomes for samples in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + Returns + ------- + avg : array-like of shape (n_samples, n_classes) + Weighted average probability for each class per sample. + """ + check_is_fitted(self) + avg = np.average( + self._collect_probas(X), axis=0, weights=self._weights_not_none + ) + return avg + + def transform(self, X): + """Return class labels or probabilities for X for each estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + probabilities_or_labels + If `voting='soft'` and `flatten_transform=True`: + returns ndarray of shape (n_samples, n_classifiers * n_classes), + being class probabilities calculated by each classifier. + If `voting='soft' and `flatten_transform=False`: + ndarray of shape (n_classifiers, n_samples, n_classes) + If `voting='hard'`: + ndarray of shape (n_samples, n_classifiers), being + class labels predicted by each classifier. + """ + check_is_fitted(self) + + if self.voting == "soft": + probas = self._collect_probas(X) + if not self.flatten_transform: + return probas + return np.hstack(probas) + + else: + return self._predict(X) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + if self.voting == "soft" and not self.flatten_transform: + raise ValueError( + "get_feature_names_out is not supported when `voting='soft'` and " + "`flatten_transform=False`" + ) + + _check_feature_names_in(self, input_features, generate_names=False) + class_name = self.__class__.__name__.lower() + + active_names = [name for name, est in self.estimators if est != "drop"] + + if self.voting == "hard": + return np.asarray( + [f"{class_name}_{name}" for name in active_names], dtype=object + ) + + # voting == "soft" + n_classes = len(self.classes_) + names_out = [ + f"{class_name}_{name}{i}" for name in active_names for i in range(n_classes) + ] + return np.asarray(names_out, dtype=object) + + +class VotingRegressor(_RoutingNotSupportedMixin, RegressorMixin, _BaseVoting): + """Prediction voting regressor for unfitted estimators. + + A voting regressor is an ensemble meta-estimator that fits several base + regressors, each on the whole dataset. Then it averages the individual + predictions to form a final prediction. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.21 + + Parameters + ---------- + estimators : list of (str, estimator) tuples + Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones + of those original estimators that will be stored in the class attribute + ``self.estimators_``. An estimator can be set to ``'drop'`` using + :meth:`set_params`. + + .. versionchanged:: 0.21 + ``'drop'`` is accepted. Using None was deprecated in 0.22 and + support was removed in 0.24. + + weights : array-like of shape (n_regressors,), default=None + Sequence of weights (`float` or `int`) to weight the occurrences of + predicted values before averaging. Uses uniform weights if `None`. + + n_jobs : int, default=None + The number of jobs to run in parallel for ``fit``. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + If True, the time elapsed while fitting will be printed as it + is completed. + + .. versionadded:: 0.23 + + Attributes + ---------- + estimators_ : list of regressors + The collection of fitted sub-estimators as defined in ``estimators`` + that are not 'drop'. + + named_estimators_ : :class:`~sklearn.utils.Bunch` + Attribute to access any fitted sub-estimators by name. + + .. versionadded:: 0.20 + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying regressor exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + VotingClassifier : Soft Voting/Majority Rule classifier. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import LinearRegression + >>> from sklearn.ensemble import RandomForestRegressor + >>> from sklearn.ensemble import VotingRegressor + >>> from sklearn.neighbors import KNeighborsRegressor + >>> r1 = LinearRegression() + >>> r2 = RandomForestRegressor(n_estimators=10, random_state=1) + >>> r3 = KNeighborsRegressor() + >>> X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]]) + >>> y = np.array([2, 6, 12, 20, 30, 42]) + >>> er = VotingRegressor([('lr', r1), ('rf', r2), ('r3', r3)]) + >>> print(er.fit(X, y).predict(X)) + [ 6.8... 8.4... 12.5... 17.8... 26... 34...] + + In the following example, we drop the `'lr'` estimator with + :meth:`~VotingRegressor.set_params` and fit the remaining two estimators: + + >>> er = er.set_params(lr='drop') + >>> er = er.fit(X, y) + >>> len(er.estimators_) + 2 + """ + + def __init__(self, estimators, *, weights=None, n_jobs=None, verbose=False): + super().__init__(estimators=estimators) + self.weights = weights + self.n_jobs = n_jobs + self.verbose = verbose + + @_fit_context( + # estimators in VotingRegressor.estimators are not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Fit the estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + Note that this is supported only if all underlying estimators + support sample weights. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + y = column_or_1d(y, warn=True) + return super().fit(X, y, sample_weight) + + def predict(self, X): + """Predict regression target for X. + + The predicted regression target of an input sample is computed as the + mean predicted regression targets of the estimators in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted values. + """ + check_is_fitted(self) + return np.average(self._predict(X), axis=1, weights=self._weights_not_none) + + def transform(self, X): + """Return predictions for X for each estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + Returns + ------- + predictions : ndarray of shape (n_samples, n_classifiers) + Values predicted by each regressor. + """ + check_is_fitted(self) + return self._predict(X) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + _check_feature_names_in(self, input_features, generate_names=False) + class_name = self.__class__.__name__.lower() + return np.asarray( + [f"{class_name}_{name}" for name, est in self.estimators if est != "drop"], + dtype=object, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..af731892880ee5e735986af47659d00a8b17f877 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py @@ -0,0 +1,1266 @@ +"""Weight Boosting. + +This module contains weight boosting estimators for both classification and +regression. + +The module structure is the following: + +- The `BaseWeightBoosting` base class implements a common ``fit`` method + for all the estimators in the module. Regression and classification + only differ from each other in the loss function that is optimized. + +- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting + (AdaBoost-SAMME) for classification problems. + +- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting + (AdaBoost.R2) for regression problems. +""" + +# Authors: Noel Dawe +# Gilles Louppe +# Hamzeh Alsalhi +# Arnaud Joly +# +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +from scipy.special import xlogy + +from ..base import ( + ClassifierMixin, + RegressorMixin, + _fit_context, + is_classifier, + is_regressor, +) +from ..metrics import accuracy_score, r2_score +from ..tree import DecisionTreeClassifier, DecisionTreeRegressor +from ..utils import _safe_indexing, check_random_state +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils.extmath import softmax, stable_cumsum +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.validation import ( + _check_sample_weight, + _num_samples, + check_is_fitted, + has_fit_parameter, +) +from ._base import BaseEnsemble + +__all__ = [ + "AdaBoostClassifier", + "AdaBoostRegressor", +] + + +class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta): + """Base class for AdaBoost estimators. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "predict"]), None], + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "learning_rate": [Interval(Real, 0, None, closed="neither")], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + estimator=None, + *, + n_estimators=50, + estimator_params=tuple(), + learning_rate=1.0, + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + estimator_params=estimator_params, + ) + + self.learning_rate = learning_rate + self.random_state = random_state + + def _check_X(self, X): + # Only called to validate X in non-fit methods, therefore reset=False + return self._validate_data( + X, + accept_sparse=["csr", "csc"], + ensure_2d=True, + allow_nd=True, + dtype=None, + reset=False, + ) + + @_fit_context( + # AdaBoost*.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Build a boosted classifier/regressor from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + The target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, the sample weights are initialized to + 1 / n_samples. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc"], + ensure_2d=True, + allow_nd=True, + dtype=None, + y_numeric=is_regressor(self), + ) + + sample_weight = _check_sample_weight( + sample_weight, X, np.float64, copy=True, only_non_negative=True + ) + sample_weight /= sample_weight.sum() + + # Check parameters + self._validate_estimator() + + # Clear any previous fit results + self.estimators_ = [] + self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64) + self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64) + + # Initialization of the random number instance that will be used to + # generate a seed at each iteration + random_state = check_random_state(self.random_state) + epsilon = np.finfo(sample_weight.dtype).eps + + zero_weight_mask = sample_weight == 0.0 + for iboost in range(self.n_estimators): + # avoid extremely small sample weight, for details see issue #20320 + sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None) + # do not clip sample weights that were exactly zero originally + sample_weight[zero_weight_mask] = 0.0 + + # Boosting step + sample_weight, estimator_weight, estimator_error = self._boost( + iboost, X, y, sample_weight, random_state + ) + + # Early termination + if sample_weight is None: + break + self.estimator_weights_[iboost] = estimator_weight + self.estimator_errors_[iboost] = estimator_error + + # Stop if error is zero + if estimator_error == 0: + break + + sample_weight_sum = np.sum(sample_weight) + + if not np.isfinite(sample_weight_sum): + warnings.warn( + ( + "Sample weights have reached infinite values," + f" at iteration {iboost}, causing overflow. " + "Iterations stopped. Try lowering the learning rate." + ), + stacklevel=2, + ) + break + + # Stop if the sum of sample weights has become non-positive + if sample_weight_sum <= 0: + break + + if iboost < self.n_estimators - 1: + # Normalize + sample_weight /= sample_weight_sum + + return self + + @abstractmethod + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost. + + Warning: This method needs to be overridden by subclasses. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + The target values (class labels). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState + The current random number generator + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + error : float + The classification error for the current boost. + If None then boosting has terminated early. + """ + pass + + def staged_score(self, X, y, sample_weight=None): + """Return staged scores for X, y. + + This generator method yields the ensemble score after each iteration of + boosting and therefore allows monitoring, such as to determine the + score on a test set after each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + Labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Yields + ------ + z : float + """ + X = self._check_X(X) + + for y_pred in self.staged_predict(X): + if is_classifier(self): + yield accuracy_score(y, y_pred, sample_weight=sample_weight) + else: + yield r2_score(y, y_pred, sample_weight=sample_weight) + + @property + def feature_importances_(self): + """The impurity-based feature importances. + + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + Returns + ------- + feature_importances_ : ndarray of shape (n_features,) + The feature importances. + """ + if self.estimators_ is None or len(self.estimators_) == 0: + raise ValueError( + "Estimator not fitted, call `fit` before `feature_importances_`." + ) + + try: + norm = self.estimator_weights_.sum() + return ( + sum( + weight * clf.feature_importances_ + for weight, clf in zip(self.estimator_weights_, self.estimators_) + ) + / norm + ) + + except AttributeError as e: + raise AttributeError( + "Unable to compute feature importances " + "since estimator does not have a " + "feature_importances_ attribute" + ) from e + + +def _samme_proba(estimator, n_classes, X): + """Calculate algorithm 4, step 2, equation c) of Zhu et al [1]. + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. + + """ + proba = estimator.predict_proba(X) + + # Displace zero probabilities so the log is defined. + # Also fix negative elements which may occur with + # negative sample weights. + np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba) + log_proba = np.log(proba) + + return (n_classes - 1) * ( + log_proba - (1.0 / n_classes) * log_proba.sum(axis=1)[:, np.newaxis] + ) + + +class AdaBoostClassifier( + _RoutingNotSupportedMixin, ClassifierMixin, BaseWeightBoosting +): + """An AdaBoost classifier. + + An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a + classifier on the original dataset and then fits additional copies of the + classifier on the same dataset but where the weights of incorrectly + classified instances are adjusted such that subsequent classifiers focus + more on difficult cases. + + This class implements the algorithm based on [2]_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : object, default=None + The base estimator from which the boosted ensemble is built. + Support for sample weighting is required, as well as proper + ``classes_`` and ``n_classes_`` attributes. If ``None``, then + the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier` + initialized with `max_depth=1`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=50 + The maximum number of estimators at which boosting is terminated. + In case of perfect fit, the learning procedure is stopped early. + Values must be in the range `[1, inf)`. + + learning_rate : float, default=1.0 + Weight applied to each classifier at each boosting iteration. A higher + learning rate increases the contribution of each classifier. There is + a trade-off between the `learning_rate` and `n_estimators` parameters. + Values must be in the range `(0.0, inf)`. + + algorithm : {'SAMME', 'SAMME.R'}, default='SAMME.R' + If 'SAMME.R' then use the SAMME.R real boosting algorithm. + ``estimator`` must support calculation of class probabilities. + If 'SAMME' then use the SAMME discrete boosting algorithm. + The SAMME.R algorithm typically converges faster than SAMME, + achieving a lower test error with fewer boosting iterations. + + .. deprecated:: 1.4 + `"SAMME.R"` is deprecated and will be removed in version 1.6. + '"SAMME"' will become the default. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given at each `estimator` at each + boosting iteration. + Thus, it is only used when `estimator` exposes a `random_state`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of classifiers + The collection of fitted sub-estimators. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_classes_ : int + The number of classes. + + estimator_weights_ : ndarray of floats + Weights for each estimator in the boosted ensemble. + + estimator_errors_ : ndarray of floats + Classification error for each estimator in the boosted + ensemble. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances if supported by the + ``estimator`` (when based on decision trees). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdaBoostRegressor : An AdaBoost regressor that begins by fitting a + regressor on the original dataset and then fits additional copies of + the regressor on the same dataset but where the weights of instances + are adjusted according to the error of the current prediction. + + GradientBoostingClassifier : GB builds an additive model in a forward + stage-wise fashion. Regression trees are fit on the negative gradient + of the binomial or multinomial deviance loss function. Binary + classification is a special case where only a single regression tree is + induced. + + sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning + method used for classification. + Creates a model that predicts the value of a target variable by + learning simple decision rules inferred from the data features. + + References + ---------- + .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of + on-Line Learning and an Application to Boosting", 1995. + + .. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost." + Statistics and its Interface 2.3 (2009): 349-360. + <10.4310/SII.2009.v2.n3.a8>` + + Examples + -------- + >>> from sklearn.ensemble import AdaBoostClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=1000, n_features=4, + ... n_informative=2, n_redundant=0, + ... random_state=0, shuffle=False) + >>> clf = AdaBoostClassifier(n_estimators=100, algorithm="SAMME", random_state=0) + >>> clf.fit(X, y) + AdaBoostClassifier(algorithm='SAMME', n_estimators=100, random_state=0) + >>> clf.predict([[0, 0, 0, 0]]) + array([1]) + >>> clf.score(X, y) + 0.96... + """ + + # TODO(1.6): Modify _parameter_constraints for "algorithm" to only check + # for "SAMME" + _parameter_constraints: dict = { + **BaseWeightBoosting._parameter_constraints, + "algorithm": [ + StrOptions({"SAMME", "SAMME.R"}), + ], + } + + # TODO(1.6): Change default "algorithm" value to "SAMME" + def __init__( + self, + estimator=None, + *, + n_estimators=50, + learning_rate=1.0, + algorithm="SAMME.R", + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + learning_rate=learning_rate, + random_state=random_state, + ) + + self.algorithm = algorithm + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1)) + + # TODO(1.6): Remove, as "SAMME.R" value for "algorithm" param will be + # removed in 1.6 + # SAMME-R requires predict_proba-enabled base estimators + if self.algorithm != "SAMME": + warnings.warn( + ( + "The SAMME.R algorithm (the default) is deprecated and will be" + " removed in 1.6. Use the SAMME algorithm to circumvent this" + " warning." + ), + FutureWarning, + ) + if not hasattr(self.estimator_, "predict_proba"): + raise TypeError( + "AdaBoostClassifier with algorithm='SAMME.R' requires " + "that the weak learner supports the calculation of class " + "probabilities with a predict_proba method.\n" + "Please change the base estimator or set " + "algorithm='SAMME' instead." + ) + + if not has_fit_parameter(self.estimator_, "sample_weight"): + raise ValueError( + f"{self.estimator.__class__.__name__} doesn't support sample_weight." + ) + + # TODO(1.6): Redefine the scope of the `_boost` and `_boost_discrete` + # functions to be the same since SAMME will be the default value for the + # "algorithm" parameter in version 1.6. Thus, a distinguishing function is + # no longer needed. (Or adjust code here, if another algorithm, shall be + # used instead of SAMME.R.) + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost. + + Perform a single boost according to the real multi-class SAMME.R + algorithm or to the discrete SAMME algorithm and return the updated + sample weights. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values (class labels). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState instance + The RandomState instance used if the base estimator accepts a + `random_state` attribute. + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + estimator_error : float + The classification error for the current boost. + If None then boosting has terminated early. + """ + if self.algorithm == "SAMME.R": + return self._boost_real(iboost, X, y, sample_weight, random_state) + + else: # elif self.algorithm == "SAMME": + return self._boost_discrete(iboost, X, y, sample_weight, random_state) + + # TODO(1.6): Remove function. The `_boost_real` function won't be used any + # longer, because the SAMME.R algorithm will be deprecated in 1.6. + def _boost_real(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost using the SAMME.R real algorithm.""" + estimator = self._make_estimator(random_state=random_state) + + estimator.fit(X, y, sample_weight=sample_weight) + + y_predict_proba = estimator.predict_proba(X) + + if iboost == 0: + self.classes_ = getattr(estimator, "classes_", None) + self.n_classes_ = len(self.classes_) + + y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0) + + # Instances incorrectly classified + incorrect = y_predict != y + + # Error fraction + estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0)) + + # Stop if classification is perfect + if estimator_error <= 0: + return sample_weight, 1.0, 0.0 + + # Construct y coding as described in Zhu et al [2]: + # + # y_k = 1 if c == k else -1 / (K - 1) + # + # where K == n_classes_ and c, k in [0, K) are indices along the second + # axis of the y coding with c being the index corresponding to the true + # class label. + n_classes = self.n_classes_ + classes = self.classes_ + y_codes = np.array([-1.0 / (n_classes - 1), 1.0]) + y_coding = y_codes.take(classes == y[:, np.newaxis]) + + # Displace zero probabilities so the log is defined. + # Also fix negative elements which may occur with + # negative sample weights. + proba = y_predict_proba # alias for readability + np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba) + + # Boost weight using multi-class AdaBoost SAMME.R alg + estimator_weight = ( + -1.0 + * self.learning_rate + * ((n_classes - 1.0) / n_classes) + * xlogy(y_coding, y_predict_proba).sum(axis=1) + ) + + # Only boost the weights if it will fit again + if not iboost == self.n_estimators - 1: + # Only boost positive weights + sample_weight *= np.exp( + estimator_weight * ((sample_weight > 0) | (estimator_weight < 0)) + ) + + return sample_weight, 1.0, estimator_error + + def _boost_discrete(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost using the SAMME discrete algorithm.""" + estimator = self._make_estimator(random_state=random_state) + + estimator.fit(X, y, sample_weight=sample_weight) + + y_predict = estimator.predict(X) + + if iboost == 0: + self.classes_ = getattr(estimator, "classes_", None) + self.n_classes_ = len(self.classes_) + + # Instances incorrectly classified + incorrect = y_predict != y + + # Error fraction + estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0)) + + # Stop if classification is perfect + if estimator_error <= 0: + return sample_weight, 1.0, 0.0 + + n_classes = self.n_classes_ + + # Stop if the error is at least as bad as random guessing + if estimator_error >= 1.0 - (1.0 / n_classes): + self.estimators_.pop(-1) + if len(self.estimators_) == 0: + raise ValueError( + "BaseClassifier in AdaBoostClassifier " + "ensemble is worse than random, ensemble " + "can not be fit." + ) + return None, None, None + + # Boost weight using multi-class AdaBoost SAMME alg + estimator_weight = self.learning_rate * ( + np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0) + ) + + # Only boost the weights if it will fit again + if not iboost == self.n_estimators - 1: + # Only boost positive weights + sample_weight = np.exp( + np.log(sample_weight) + + estimator_weight * incorrect * (sample_weight > 0) + ) + + return sample_weight, estimator_weight, estimator_error + + def predict(self, X): + """Predict classes for X. + + The predicted class of an input sample is computed as the weighted mean + prediction of the classifiers in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted classes. + """ + pred = self.decision_function(X) + + if self.n_classes_ == 2: + return self.classes_.take(pred > 0, axis=0) + + return self.classes_.take(np.argmax(pred, axis=1), axis=0) + + def staged_predict(self, X): + """Return staged predictions for X. + + The predicted class of an input sample is computed as the weighted mean + prediction of the classifiers in the ensemble. + + This generator method yields the ensemble prediction after each + iteration of boosting and therefore allows monitoring, such as to + determine the prediction on a test set after each boost. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted classes. + """ + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_ + + if n_classes == 2: + for pred in self.staged_decision_function(X): + yield np.array(classes.take(pred > 0, axis=0)) + + else: + for pred in self.staged_decision_function(X): + yield np.array(classes.take(np.argmax(pred, axis=1), axis=0)) + + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + score : ndarray of shape of (n_samples, k) + The decision function of the input samples. The order of + outputs is the same as that of the :term:`classes_` attribute. + Binary classification is a special cases with ``k == 1``, + otherwise ``k==n_classes``. For binary classification, + values closer to -1 or 1 mean more like the first or second + class in ``classes_``, respectively. + """ + check_is_fitted(self) + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_[:, np.newaxis] + + # TODO(1.6): Remove, because "algorithm" param will be deprecated in 1.6 + if self.algorithm == "SAMME.R": + # The weights are all 1. for SAMME.R + pred = sum( + _samme_proba(estimator, n_classes, X) for estimator in self.estimators_ + ) + else: # self.algorithm == "SAMME" + pred = sum( + np.where( + (estimator.predict(X) == classes).T, + w, + -1 / (n_classes - 1) * w, + ) + for estimator, w in zip(self.estimators_, self.estimator_weights_) + ) + + pred /= self.estimator_weights_.sum() + if n_classes == 2: + pred[:, 0] *= -1 + return pred.sum(axis=1) + return pred + + def staged_decision_function(self, X): + """Compute decision function of ``X`` for each boosting iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each boosting iteration. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + score : generator of ndarray of shape (n_samples, k) + The decision function of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + Binary classification is a special cases with ``k == 1``, + otherwise ``k==n_classes``. For binary classification, + values closer to -1 or 1 mean more like the first or second + class in ``classes_``, respectively. + """ + check_is_fitted(self) + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_[:, np.newaxis] + pred = None + norm = 0.0 + + for weight, estimator in zip(self.estimator_weights_, self.estimators_): + norm += weight + + # TODO(1.6): Remove, because "algorithm" param will be deprecated in + # 1.6 + if self.algorithm == "SAMME.R": + # The weights are all 1. for SAMME.R + current_pred = _samme_proba(estimator, n_classes, X) + else: # elif self.algorithm == "SAMME": + current_pred = np.where( + (estimator.predict(X) == classes).T, + weight, + -1 / (n_classes - 1) * weight, + ) + + if pred is None: + pred = current_pred + else: + pred += current_pred + + if n_classes == 2: + tmp_pred = np.copy(pred) + tmp_pred[:, 0] *= -1 + yield (tmp_pred / norm).sum(axis=1) + else: + yield pred / norm + + @staticmethod + def _compute_proba_from_decision(decision, n_classes): + """Compute probabilities from the decision function. + + This is based eq. (15) of [1] where: + p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X))) + = softmax((1 / K-1) * f(X)) + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", + 2009. + """ + if n_classes == 2: + decision = np.vstack([-decision, decision]).T / 2 + else: + decision /= n_classes - 1 + return softmax(decision, copy=False) + + def predict_proba(self, X): + """Predict class probabilities for X. + + The predicted class probabilities of an input sample is computed as + the weighted mean predicted class probabilities of the classifiers + in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + check_is_fitted(self) + n_classes = self.n_classes_ + + if n_classes == 1: + return np.ones((_num_samples(X), 1)) + + decision = self.decision_function(X) + return self._compute_proba_from_decision(decision, n_classes) + + def staged_predict_proba(self, X): + """Predict class probabilities for X. + + The predicted class probabilities of an input sample is computed as + the weighted mean predicted class probabilities of the classifiers + in the ensemble. + + This generator method yields the ensemble predicted class probabilities + after each iteration of boosting and therefore allows monitoring, such + as to determine the predicted class probabilities on a test set after + each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + p : generator of ndarray of shape (n_samples,) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + + n_classes = self.n_classes_ + + for decision in self.staged_decision_function(X): + yield self._compute_proba_from_decision(decision, n_classes) + + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + The predicted class log-probabilities of an input sample is computed as + the weighted mean predicted class log-probabilities of the classifiers + in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + return np.log(self.predict_proba(X)) + + +class AdaBoostRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseWeightBoosting): + """An AdaBoost regressor. + + An AdaBoost [1] regressor is a meta-estimator that begins by fitting a + regressor on the original dataset and then fits additional copies of the + regressor on the same dataset but where the weights of instances are + adjusted according to the error of the current prediction. As such, + subsequent regressors focus more on difficult cases. + + This class implements the algorithm known as AdaBoost.R2 [2]. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : object, default=None + The base estimator from which the boosted ensemble is built. + If ``None``, then the base estimator is + :class:`~sklearn.tree.DecisionTreeRegressor` initialized with + `max_depth=3`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=50 + The maximum number of estimators at which boosting is terminated. + In case of perfect fit, the learning procedure is stopped early. + Values must be in the range `[1, inf)`. + + learning_rate : float, default=1.0 + Weight applied to each regressor at each boosting iteration. A higher + learning rate increases the contribution of each regressor. There is + a trade-off between the `learning_rate` and `n_estimators` parameters. + Values must be in the range `(0.0, inf)`. + + loss : {'linear', 'square', 'exponential'}, default='linear' + The loss function to use when updating the weights after each + boosting iteration. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given at each `estimator` at each + boosting iteration. + Thus, it is only used when `estimator` exposes a `random_state`. + In addition, it controls the bootstrap of the weights used to train the + `estimator` at each boosting iteration. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of regressors + The collection of fitted sub-estimators. + + estimator_weights_ : ndarray of floats + Weights for each estimator in the boosted ensemble. + + estimator_errors_ : ndarray of floats + Regression error for each estimator in the boosted ensemble. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances if supported by the + ``estimator`` (when based on decision trees). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdaBoostClassifier : An AdaBoost classifier. + GradientBoostingRegressor : Gradient Boosting Classification Tree. + sklearn.tree.DecisionTreeRegressor : A decision tree regressor. + + References + ---------- + .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of + on-Line Learning and an Application to Boosting", 1995. + + .. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. + + Examples + -------- + >>> from sklearn.ensemble import AdaBoostRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_features=4, n_informative=2, + ... random_state=0, shuffle=False) + >>> regr = AdaBoostRegressor(random_state=0, n_estimators=100) + >>> regr.fit(X, y) + AdaBoostRegressor(n_estimators=100, random_state=0) + >>> regr.predict([[0, 0, 0, 0]]) + array([4.7972...]) + >>> regr.score(X, y) + 0.9771... + """ + + _parameter_constraints: dict = { + **BaseWeightBoosting._parameter_constraints, + "loss": [StrOptions({"linear", "square", "exponential"})], + } + + def __init__( + self, + estimator=None, + *, + n_estimators=50, + learning_rate=1.0, + loss="linear", + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + learning_rate=learning_rate, + random_state=random_state, + ) + + self.loss = loss + self.random_state = random_state + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3)) + + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost for regression + + Perform a single boost according to the AdaBoost.R2 algorithm and + return the updated sample weights. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values (class labels in classification, real numbers in + regression). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState + The RandomState instance used if the base estimator accepts a + `random_state` attribute. + Controls also the bootstrap of the weights used to train the weak + learner. + replacement. + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + estimator_error : float + The regression error for the current boost. + If None then boosting has terminated early. + """ + estimator = self._make_estimator(random_state=random_state) + + # Weighted sampling of the training set with replacement + bootstrap_idx = random_state.choice( + np.arange(_num_samples(X)), + size=_num_samples(X), + replace=True, + p=sample_weight, + ) + + # Fit on the bootstrapped sample and obtain a prediction + # for all samples in the training set + X_ = _safe_indexing(X, bootstrap_idx) + y_ = _safe_indexing(y, bootstrap_idx) + estimator.fit(X_, y_) + y_predict = estimator.predict(X) + + error_vect = np.abs(y_predict - y) + sample_mask = sample_weight > 0 + masked_sample_weight = sample_weight[sample_mask] + masked_error_vector = error_vect[sample_mask] + + error_max = masked_error_vector.max() + if error_max != 0: + masked_error_vector /= error_max + + if self.loss == "square": + masked_error_vector **= 2 + elif self.loss == "exponential": + masked_error_vector = 1.0 - np.exp(-masked_error_vector) + + # Calculate the average loss + estimator_error = (masked_sample_weight * masked_error_vector).sum() + + if estimator_error <= 0: + # Stop if fit is perfect + return sample_weight, 1.0, 0.0 + + elif estimator_error >= 0.5: + # Discard current estimator only if it isn't the only one + if len(self.estimators_) > 1: + self.estimators_.pop(-1) + return None, None, None + + beta = estimator_error / (1.0 - estimator_error) + + # Boost weight using AdaBoost.R2 alg + estimator_weight = self.learning_rate * np.log(1.0 / beta) + + if not iboost == self.n_estimators - 1: + sample_weight[sample_mask] *= np.power( + beta, (1.0 - masked_error_vector) * self.learning_rate + ) + + return sample_weight, estimator_weight, estimator_error + + def _get_median_predict(self, X, limit): + # Evaluate predictions of all estimators + predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T + + # Sort the predictions + sorted_idx = np.argsort(predictions, axis=1) + + # Find index of median prediction for each sample + weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1) + median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis] + median_idx = median_or_above.argmax(axis=1) + + median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx] + + # Return median predictions + return predictions[np.arange(_num_samples(X)), median_estimators] + + def predict(self, X): + """Predict regression value for X. + + The predicted regression value of an input sample is computed + as the weighted median prediction of the regressors in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted regression values. + """ + check_is_fitted(self) + X = self._check_X(X) + + return self._get_median_predict(X, len(self.estimators_)) + + def staged_predict(self, X): + """Return staged predictions for X. + + The predicted regression value of an input sample is computed + as the weighted median prediction of the regressors in the ensemble. + + This generator method yields the ensemble prediction after each + iteration of boosting and therefore allows monitoring, such as to + determine the prediction on a test set after each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted regression values. + """ + check_is_fitted(self) + X = self._check_X(X) + + for i, _ in enumerate(self.estimators_, 1): + yield self._get_median_predict(X, limit=i) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12c17c87b94e98df349c71b8a44400ac05ac1fa8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8511645964a5562bfbefbaabc54ef4552c11c8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b8f1d147fd5c68fb69fce8b1c00e06e13a4af1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_encoders.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_encoders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9488cc2b4f487450a2aae818806a32f59ebd7acf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_encoders.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_function_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_function_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83b1bd58451f8e390609bd73d9ebcd90ec20dfce Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_function_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0b7fcb5320265bb08f412fa7ccebd323bf321af Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21db11d7abffeebf2aa541a8f7e8c2dd5c9c242f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_target_encoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_target_encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4983e21baaf1eaa491d01220754eda05e7f0391 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_target_encoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b544555279e2ddeb5906e797d990c2d6dac08774 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dfb6d78beadbb93bf29096910654984777ace97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_discretization.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_discretization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e97b1eb54f8318bbd82e6ebfb8cd0cae919478a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_discretization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_encoders.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_encoders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6516106c5ff6ea36cf7135937063575b158da308 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_encoders.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98970d083ecc131234b934bc49395919c589935e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_label.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc2f04956bbfcabd5d0dfee18cbe4108b4b7d8f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_label.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_polynomial.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c8e9ec3ed659992238c712b648037e3443d7450 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_polynomial.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0250713f3c8124157aa34b35570320c0710341d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..09f702f64ce2367ef6fe47fdb789e0475bf11def --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py @@ -0,0 +1,187 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.base import clone +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import ( + MaxAbsScaler, + MinMaxScaler, + PowerTransformer, + QuantileTransformer, + RobustScaler, + StandardScaler, + maxabs_scale, + minmax_scale, + power_transform, + quantile_transform, + robust_scale, + scale, +) +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import ( + BSR_CONTAINERS, + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DIA_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +iris = load_iris() + + +def _get_valid_samples_by_column(X, col): + """Get non NaN samples in column of X""" + return X[:, [col]][~np.isnan(X[:, col])] + + +@pytest.mark.parametrize( + "est, func, support_sparse, strictly_positive, omit_kwargs", + [ + (MaxAbsScaler(), maxabs_scale, True, False, []), + (MinMaxScaler(), minmax_scale, False, False, ["clip"]), + (StandardScaler(), scale, False, False, []), + (StandardScaler(with_mean=False), scale, True, False, []), + (PowerTransformer("yeo-johnson"), power_transform, False, False, []), + (PowerTransformer("box-cox"), power_transform, False, True, []), + (QuantileTransformer(n_quantiles=10), quantile_transform, True, False, []), + (RobustScaler(), robust_scale, False, False, []), + (RobustScaler(with_centering=False), robust_scale, True, False, []), + ], +) +def test_missing_value_handling( + est, func, support_sparse, strictly_positive, omit_kwargs +): + # check that the preprocessing method let pass nan + rng = np.random.RandomState(42) + X = iris.data.copy() + n_missing = 50 + X[ + rng.randint(X.shape[0], size=n_missing), rng.randint(X.shape[1], size=n_missing) + ] = np.nan + if strictly_positive: + X += np.nanmin(X) + 0.1 + X_train, X_test = train_test_split(X, random_state=1) + # sanity check + assert not np.all(np.isnan(X_train), axis=0).any() + assert np.any(np.isnan(X_train), axis=0).all() + assert np.any(np.isnan(X_test), axis=0).all() + X_test[:, 0] = np.nan # make sure this boundary case is tested + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt = est.fit(X_train).transform(X_test) + # ensure no warnings are raised + # missing values should still be missing, and only them + assert_array_equal(np.isnan(Xt), np.isnan(X_test)) + + # check that the function leads to the same results as the class + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_class = est.transform(X_train) + kwargs = est.get_params() + # remove the parameters which should be omitted because they + # are not defined in the counterpart function of the preprocessing class + for kwarg in omit_kwargs: + _ = kwargs.pop(kwarg) + Xt_func = func(X_train, **kwargs) + assert_array_equal(np.isnan(Xt_func), np.isnan(Xt_class)) + assert_allclose(Xt_func[~np.isnan(Xt_func)], Xt_class[~np.isnan(Xt_class)]) + + # check that the inverse transform keep NaN + Xt_inv = est.inverse_transform(Xt) + assert_array_equal(np.isnan(Xt_inv), np.isnan(X_test)) + # FIXME: we can introduce equal_nan=True in recent version of numpy. + # For the moment which just check that non-NaN values are almost equal. + assert_allclose(Xt_inv[~np.isnan(Xt_inv)], X_test[~np.isnan(X_test)]) + + for i in range(X.shape[1]): + # train only on non-NaN + est.fit(_get_valid_samples_by_column(X_train, i)) + # check transforming with NaN works even when training without NaN + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_col = est.transform(X_test[:, [i]]) + assert_allclose(Xt_col, Xt[:, [i]]) + # check non-NaN is handled as before - the 1st column is all nan + if not np.isnan(X_test[:, i]).all(): + Xt_col_nonan = est.transform(_get_valid_samples_by_column(X_test, i)) + assert_array_equal(Xt_col_nonan, Xt_col[~np.isnan(Xt_col.squeeze())]) + + if support_sparse: + est_dense = clone(est) + est_sparse = clone(est) + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_dense = est_dense.fit(X_train).transform(X_test) + Xt_inv_dense = est_dense.inverse_transform(Xt_dense) + + for sparse_container in ( + BSR_CONTAINERS + + COO_CONTAINERS + + CSC_CONTAINERS + + CSR_CONTAINERS + + DIA_CONTAINERS + + DOK_CONTAINERS + + LIL_CONTAINERS + ): + # check that the dense and sparse inputs lead to the same results + # precompute the matrix to avoid catching side warnings + X_train_sp = sparse_container(X_train) + X_test_sp = sparse_container(X_test) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", PendingDeprecationWarning) + warnings.simplefilter("error", RuntimeWarning) + Xt_sp = est_sparse.fit(X_train_sp).transform(X_test_sp) + + assert_allclose(Xt_sp.toarray(), Xt_dense) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", PendingDeprecationWarning) + warnings.simplefilter("error", RuntimeWarning) + Xt_inv_sp = est_sparse.inverse_transform(Xt_sp) + + assert_allclose(Xt_inv_sp.toarray(), Xt_inv_dense) + + +@pytest.mark.parametrize( + "est, func", + [ + (MaxAbsScaler(), maxabs_scale), + (MinMaxScaler(), minmax_scale), + (StandardScaler(), scale), + (StandardScaler(with_mean=False), scale), + (PowerTransformer("yeo-johnson"), power_transform), + ( + PowerTransformer("box-cox"), + power_transform, + ), + (QuantileTransformer(n_quantiles=3), quantile_transform), + (RobustScaler(), robust_scale), + (RobustScaler(with_centering=False), robust_scale), + ], +) +def test_missing_value_pandas_na_support(est, func): + # Test pandas IntegerArray with pd.NA + pd = pytest.importorskip("pandas") + + X = np.array( + [ + [1, 2, 3, np.nan, np.nan, 4, 5, 1], + [np.nan, np.nan, 8, 4, 6, np.nan, np.nan, 8], + [1, 2, 3, 4, 5, 6, 7, 8], + ] + ).T + + # Creates dataframe with IntegerArrays with pd.NA + X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c"]) + X_df["c"] = X_df["c"].astype("int") + + X_trans = est.fit_transform(X) + X_df_trans = est.fit_transform(X_df) + + assert_allclose(X_trans, X_df_trans) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py new file mode 100644 index 0000000000000000000000000000000000000000..46ec86f7a75d43378614639f04e3c72e7e69aede --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py @@ -0,0 +1,503 @@ +import warnings + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn import clone +from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_array_almost_equal, + assert_array_equal, +) + +X = [[-2, 1.5, -4, -1], [-1, 2.5, -3, -0.5], [0, 3.5, -2, 0.5], [1, 4.5, -1, 2]] + + +@pytest.mark.parametrize( + "strategy, expected, sample_weight", + [ + ("uniform", [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]], None), + ("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], None), + ("quantile", [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], None), + ( + "quantile", + [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], + [1, 1, 2, 1], + ), + ( + "quantile", + [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], + [1, 1, 1, 1], + ), + ( + "quantile", + [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]], + [0, 1, 1, 1], + ), + ( + "kmeans", + [[0, 0, 0, 0], [1, 1, 1, 0], [1, 1, 1, 1], [2, 2, 2, 2]], + [1, 0, 3, 1], + ), + ( + "kmeans", + [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], + [1, 1, 1, 1], + ), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +def test_fit_transform(strategy, expected, sample_weight): + est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy) + est.fit(X, sample_weight=sample_weight) + assert_array_equal(expected, est.transform(X)) + + +def test_valid_n_bins(): + KBinsDiscretizer(n_bins=2).fit_transform(X) + KBinsDiscretizer(n_bins=np.array([2])[0]).fit_transform(X) + assert KBinsDiscretizer(n_bins=2).fit(X).n_bins_.dtype == np.dtype(int) + + +@pytest.mark.parametrize("strategy", ["uniform"]) +def test_kbinsdiscretizer_wrong_strategy_with_weights(strategy): + """Check that we raise an error when the wrong strategy is used.""" + sample_weight = np.ones(shape=(len(X))) + est = KBinsDiscretizer(n_bins=3, strategy=strategy) + err_msg = ( + "`sample_weight` was provided but it cannot be used with strategy='uniform'." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, sample_weight=sample_weight) + + +def test_invalid_n_bins_array(): + # Bad shape + n_bins = np.full((2, 4), 2.0) + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)." + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + # Incorrect number of features + n_bins = [1, 2, 2] + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)." + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + # Bad bin values + n_bins = [1, 2, 2, 1] + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = ( + "KBinsDiscretizer received an invalid number of bins " + "at indices 0, 3. Number of bins must be at least 2, " + "and must be an int." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + # Float bin values + n_bins = [2.1, 2, 2.1, 2] + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = ( + "KBinsDiscretizer received an invalid number of bins " + "at indices 0, 2. Number of bins must be at least 2, " + "and must be an int." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + +@pytest.mark.parametrize( + "strategy, expected, sample_weight", + [ + ("uniform", [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]], None), + ("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]], None), + ("quantile", [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], None), + ( + "quantile", + [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], + [1, 1, 3, 1], + ), + ( + "quantile", + [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]], + [0, 1, 3, 1], + ), + # ( + # "quantile", + # [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], + # [1, 1, 1, 1], + # ), + # + # TODO: This test case above aims to test if the case where an array of + # ones passed in sample_weight parameter is equal to the case when + # sample_weight is None. + # Unfortunately, the behavior of `_weighted_percentile` when + # `sample_weight = [1, 1, 1, 1]` are currently not equivalent. + # This problem has been addressed in issue : + # https://github.com/scikit-learn/scikit-learn/issues/17370 + ( + "kmeans", + [[0, 0, 0, 0], [0, 1, 1, 0], [1, 1, 1, 1], [1, 2, 2, 2]], + [1, 0, 3, 1], + ), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +def test_fit_transform_n_bins_array(strategy, expected, sample_weight): + est = KBinsDiscretizer( + n_bins=[2, 3, 3, 3], encode="ordinal", strategy=strategy + ).fit(X, sample_weight=sample_weight) + assert_array_equal(expected, est.transform(X)) + + # test the shape of bin_edges_ + n_features = np.array(X).shape[1] + assert est.bin_edges_.shape == (n_features,) + for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_): + assert bin_edges.shape == (n_bins + 1,) + + +@pytest.mark.filterwarnings("ignore: Bins whose width are too small") +def test_kbinsdiscretizer_effect_sample_weight(): + """Check the impact of `sample_weight` one computed quantiles.""" + X = np.array([[-2], [-1], [1], [3], [500], [1000]]) + # add a large number of bins such that each sample with a non-null weight + # will be used as bin edge + est = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile") + est.fit(X, sample_weight=[1, 1, 1, 1, 0, 0]) + assert_allclose(est.bin_edges_[0], [-2, -1, 1, 3]) + assert_allclose(est.transform(X), [[0.0], [1.0], [2.0], [2.0], [2.0], [2.0]]) + + +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("strategy", ["kmeans", "quantile"]) +def test_kbinsdiscretizer_no_mutating_sample_weight(strategy): + """Make sure that `sample_weight` is not changed in place.""" + est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy) + sample_weight = np.array([1, 3, 1, 2], dtype=np.float64) + sample_weight_copy = np.copy(sample_weight) + est.fit(X, sample_weight=sample_weight) + assert_allclose(sample_weight, sample_weight_copy) + + +@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) +def test_same_min_max(strategy): + warnings.simplefilter("always") + X = np.array([[1, -2], [1, -1], [1, 0], [1, 1]]) + est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode="ordinal") + warning_message = "Feature 0 is constant and will be replaced with 0." + with pytest.warns(UserWarning, match=warning_message): + est.fit(X) + assert est.n_bins_[0] == 1 + # replace the feature with zeros + Xt = est.transform(X) + assert_array_equal(Xt[:, 0], np.zeros(X.shape[0])) + + +def test_transform_1d_behavior(): + X = np.arange(4) + est = KBinsDiscretizer(n_bins=2) + with pytest.raises(ValueError): + est.fit(X) + + est = KBinsDiscretizer(n_bins=2) + est.fit(X.reshape(-1, 1)) + with pytest.raises(ValueError): + est.transform(X) + + +@pytest.mark.parametrize("i", range(1, 9)) +def test_numeric_stability(i): + X_init = np.array([2.0, 4.0, 6.0, 8.0, 10.0]).reshape(-1, 1) + Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1) + + # Test up to discretizing nano units + X = X_init / 10**i + Xt = KBinsDiscretizer(n_bins=2, encode="ordinal").fit_transform(X) + assert_array_equal(Xt_expected, Xt) + + +def test_encode_options(): + est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="ordinal").fit(X) + Xt_1 = est.transform(X) + est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot-dense").fit(X) + Xt_2 = est.transform(X) + assert not sp.issparse(Xt_2) + assert_array_equal( + OneHotEncoder( + categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=False + ).fit_transform(Xt_1), + Xt_2, + ) + est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot").fit(X) + Xt_3 = est.transform(X) + assert sp.issparse(Xt_3) + assert_array_equal( + OneHotEncoder( + categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=True + ) + .fit_transform(Xt_1) + .toarray(), + Xt_3.toarray(), + ) + + +@pytest.mark.parametrize( + "strategy, expected_2bins, expected_3bins, expected_5bins", + [ + ("uniform", [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]), + ("kmeans", [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]), + ("quantile", [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4]), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +def test_nonuniform_strategies( + strategy, expected_2bins, expected_3bins, expected_5bins +): + X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1) + + # with 2 bins + est = KBinsDiscretizer(n_bins=2, strategy=strategy, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(expected_2bins, Xt.ravel()) + + # with 3 bins + est = KBinsDiscretizer(n_bins=3, strategy=strategy, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(expected_3bins, Xt.ravel()) + + # with 5 bins + est = KBinsDiscretizer(n_bins=5, strategy=strategy, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(expected_5bins, Xt.ravel()) + + +@pytest.mark.parametrize( + "strategy, expected_inv", + [ + ( + "uniform", + [ + [-1.5, 2.0, -3.5, -0.5], + [-0.5, 3.0, -2.5, -0.5], + [0.5, 4.0, -1.5, 0.5], + [0.5, 4.0, -1.5, 1.5], + ], + ), + ( + "kmeans", + [ + [-1.375, 2.125, -3.375, -0.5625], + [-1.375, 2.125, -3.375, -0.5625], + [-0.125, 3.375, -2.125, 0.5625], + [0.75, 4.25, -1.25, 1.625], + ], + ), + ( + "quantile", + [ + [-1.5, 2.0, -3.5, -0.75], + [-0.5, 3.0, -2.5, 0.0], + [0.5, 4.0, -1.5, 1.25], + [0.5, 4.0, -1.5, 1.25], + ], + ), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) +def test_inverse_transform(strategy, encode, expected_inv): + kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, encode=encode) + Xt = kbd.fit_transform(X) + Xinv = kbd.inverse_transform(Xt) + assert_array_almost_equal(expected_inv, Xinv) + + +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) +def test_transform_outside_fit_range(strategy): + X = np.array([0, 1, 2, 3])[:, None] + kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode="ordinal") + kbd.fit(X) + + X2 = np.array([-2, 5])[:, None] + X2t = kbd.transform(X2) + assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_) + assert_array_equal(X2t.min(axis=0), [0]) + + +def test_overwrite(): + X = np.array([0, 1, 2, 3])[:, None] + X_before = X.copy() + + est = KBinsDiscretizer(n_bins=3, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(X, X_before) + + Xt_before = Xt.copy() + Xinv = est.inverse_transform(Xt) + assert_array_equal(Xt, Xt_before) + assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]])) + + +@pytest.mark.parametrize( + "strategy, expected_bin_edges", [("quantile", [0, 1, 3]), ("kmeans", [0, 1.5, 3])] +) +def test_redundant_bins(strategy, expected_bin_edges): + X = [[0], [0], [0], [0], [3], [3]] + kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, subsample=None) + warning_message = "Consider decreasing the number of bins." + with pytest.warns(UserWarning, match=warning_message): + kbd.fit(X) + assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges) + + +def test_percentile_numeric_stability(): + X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1) + bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95]) + Xt = np.array([0, 0, 4]).reshape(-1, 1) + kbd = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile") + warning_message = "Consider decreasing the number of bins." + with pytest.warns(UserWarning, match=warning_message): + kbd.fit(X) + + assert_array_almost_equal(kbd.bin_edges_[0], bin_edges) + assert_array_almost_equal(kbd.transform(X), Xt) + + +@pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64]) +@pytest.mark.parametrize("out_dtype", [None, np.float32, np.float64]) +@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) +def test_consistent_dtype(in_dtype, out_dtype, encode): + X_input = np.array(X, dtype=in_dtype) + kbd = KBinsDiscretizer(n_bins=3, encode=encode, dtype=out_dtype) + kbd.fit(X_input) + + # test output dtype + if out_dtype is not None: + expected_dtype = out_dtype + elif out_dtype is None and X_input.dtype == np.float16: + # wrong numeric input dtype are cast in np.float64 + expected_dtype = np.float64 + else: + expected_dtype = X_input.dtype + Xt = kbd.transform(X_input) + assert Xt.dtype == expected_dtype + + +@pytest.mark.parametrize("input_dtype", [np.float16, np.float32, np.float64]) +@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) +def test_32_equal_64(input_dtype, encode): + # TODO this check is redundant with common checks and can be removed + # once #16290 is merged + X_input = np.array(X, dtype=input_dtype) + + # 32 bit output + kbd_32 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float32) + kbd_32.fit(X_input) + Xt_32 = kbd_32.transform(X_input) + + # 64 bit output + kbd_64 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float64) + kbd_64.fit(X_input) + Xt_64 = kbd_64.transform(X_input) + + assert_allclose_dense_sparse(Xt_32, Xt_64) + + +def test_kbinsdiscretizer_subsample_default(): + # Since the size of X is small (< 2e5), subsampling will not take place. + X = np.array([-2, 1.5, -4, -1]).reshape(-1, 1) + kbd_default = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile") + kbd_default.fit(X) + + kbd_without_subsampling = clone(kbd_default) + kbd_without_subsampling.set_params(subsample=None) + kbd_without_subsampling.fit(X) + + for bin_kbd_default, bin_kbd_with_subsampling in zip( + kbd_default.bin_edges_[0], kbd_without_subsampling.bin_edges_[0] + ): + np.testing.assert_allclose(bin_kbd_default, bin_kbd_with_subsampling) + assert kbd_default.bin_edges_.shape == kbd_without_subsampling.bin_edges_.shape + + +@pytest.mark.parametrize( + "encode, expected_names", + [ + ( + "onehot", + [ + f"feat{col_id}_{float(bin_id)}" + for col_id in range(3) + for bin_id in range(4) + ], + ), + ( + "onehot-dense", + [ + f"feat{col_id}_{float(bin_id)}" + for col_id in range(3) + for bin_id in range(4) + ], + ), + ("ordinal", [f"feat{col_id}" for col_id in range(3)]), + ], +) +def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names): + """Check get_feature_names_out for different settings. + Non-regression test for #22731 + """ + X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]] + + kbd = KBinsDiscretizer(n_bins=4, encode=encode).fit(X) + Xt = kbd.transform(X) + + input_features = [f"feat{i}" for i in range(3)] + output_names = kbd.get_feature_names_out(input_features) + assert Xt.shape[1] == output_names.shape[0] + + assert_array_equal(output_names, expected_names) + + +@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) +def test_kbinsdiscretizer_subsample(strategy, global_random_seed): + # Check that the bin edges are almost the same when subsampling is used. + X = np.random.RandomState(global_random_seed).random_sample((100000, 1)) + 1 + + kbd_subsampling = KBinsDiscretizer( + strategy=strategy, subsample=50000, random_state=global_random_seed + ) + kbd_subsampling.fit(X) + + kbd_no_subsampling = clone(kbd_subsampling) + kbd_no_subsampling.set_params(subsample=None) + kbd_no_subsampling.fit(X) + + # We use a large tolerance because we can't expect the bin edges to be exactly the + # same when subsampling is used. + assert_allclose( + kbd_subsampling.bin_edges_[0], kbd_no_subsampling.bin_edges_[0], rtol=1e-2 + ) + + +# TODO(1.5) remove this test +@pytest.mark.parametrize("strategy", ["uniform", "kmeans"]) +def test_kbd_subsample_warning(strategy): + # Check the future warning for the change of default of subsample + X = np.random.RandomState(0).random_sample((100, 1)) + + kbd = KBinsDiscretizer(strategy=strategy, random_state=0) + with pytest.warns(FutureWarning, match="subsample=200_000 will be used by default"): + kbd.fit(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..ee5e1152fc710e5791e446ca8ffe0bc87beb001b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py @@ -0,0 +1,2338 @@ +import re + +import numpy as np +import pytest +from scipy import sparse + +from sklearn.exceptions import NotFittedError +from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder +from sklearn.utils import is_scalar_nan +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_one_hot_encoder_sparse_dense(): + # check that sparse and dense will give the same results + + X = np.array([[3, 2, 1], [0, 1, 1]]) + enc_sparse = OneHotEncoder() + enc_dense = OneHotEncoder(sparse_output=False) + + X_trans_sparse = enc_sparse.fit_transform(X) + X_trans_dense = enc_dense.fit_transform(X) + + assert X_trans_sparse.shape == (2, 5) + assert X_trans_dense.shape == (2, 5) + + assert sparse.issparse(X_trans_sparse) + assert not sparse.issparse(X_trans_dense) + + # check outcome + assert_array_equal( + X_trans_sparse.toarray(), [[0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0, 1.0]] + ) + assert_array_equal(X_trans_sparse.toarray(), X_trans_dense) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_one_hot_encoder_handle_unknown(handle_unknown): + X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) + X2 = np.array([[4, 1, 1]]) + + # Test that one hot encoder raises error for unknown features + # present during transform. + oh = OneHotEncoder(handle_unknown="error") + oh.fit(X) + with pytest.raises(ValueError, match="Found unknown categories"): + oh.transform(X2) + + # Test the ignore option, ignores unknown features (giving all 0's) + oh = OneHotEncoder(handle_unknown=handle_unknown) + oh.fit(X) + X2_passed = X2.copy() + assert_array_equal( + oh.transform(X2_passed).toarray(), + np.array([[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]), + ) + # ensure transformed data was not modified in place + assert_allclose(X2, X2_passed) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_one_hot_encoder_handle_unknown_strings(handle_unknown): + X = np.array(["11111111", "22", "333", "4444"]).reshape((-1, 1)) + X2 = np.array(["55555", "22"]).reshape((-1, 1)) + # Non Regression test for the issue #12470 + # Test the ignore option, when categories are numpy string dtype + # particularly when the known category strings are larger + # than the unknown category strings + oh = OneHotEncoder(handle_unknown=handle_unknown) + oh.fit(X) + X2_passed = X2.copy() + assert_array_equal( + oh.transform(X2_passed).toarray(), + np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]), + ) + # ensure transformed data was not modified in place + assert_array_equal(X2, X2_passed) + + +@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) +@pytest.mark.parametrize("input_dtype", [np.int32, np.float32, np.float64]) +def test_one_hot_encoder_dtype(input_dtype, output_dtype): + X = np.asarray([[0, 1]], dtype=input_dtype).T + X_expected = np.asarray([[1, 0], [0, 1]], dtype=output_dtype) + + oh = OneHotEncoder(categories="auto", dtype=output_dtype) + assert_array_equal(oh.fit_transform(X).toarray(), X_expected) + assert_array_equal(oh.fit(X).transform(X).toarray(), X_expected) + + oh = OneHotEncoder(categories="auto", dtype=output_dtype, sparse_output=False) + assert_array_equal(oh.fit_transform(X), X_expected) + assert_array_equal(oh.fit(X).transform(X), X_expected) + + +@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) +def test_one_hot_encoder_dtype_pandas(output_dtype): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + X_expected = np.array([[1, 0, 1, 0], [0, 1, 0, 1]], dtype=output_dtype) + + oh = OneHotEncoder(dtype=output_dtype) + assert_array_equal(oh.fit_transform(X_df).toarray(), X_expected) + assert_array_equal(oh.fit(X_df).transform(X_df).toarray(), X_expected) + + oh = OneHotEncoder(dtype=output_dtype, sparse_output=False) + assert_array_equal(oh.fit_transform(X_df), X_expected) + assert_array_equal(oh.fit(X_df).transform(X_df), X_expected) + + +def test_one_hot_encoder_feature_names(): + enc = OneHotEncoder() + X = [ + ["Male", 1, "girl", 2, 3], + ["Female", 41, "girl", 1, 10], + ["Male", 51, "boy", 12, 3], + ["Male", 91, "girl", 21, 30], + ] + + enc.fit(X) + feature_names = enc.get_feature_names_out() + + assert_array_equal( + [ + "x0_Female", + "x0_Male", + "x1_1", + "x1_41", + "x1_51", + "x1_91", + "x2_boy", + "x2_girl", + "x3_1", + "x3_2", + "x3_12", + "x3_21", + "x4_3", + "x4_10", + "x4_30", + ], + feature_names, + ) + + feature_names2 = enc.get_feature_names_out(["one", "two", "three", "four", "five"]) + + assert_array_equal( + [ + "one_Female", + "one_Male", + "two_1", + "two_41", + "two_51", + "two_91", + "three_boy", + "three_girl", + "four_1", + "four_2", + "four_12", + "four_21", + "five_3", + "five_10", + "five_30", + ], + feature_names2, + ) + + with pytest.raises(ValueError, match="input_features should have length"): + enc.get_feature_names_out(["one", "two"]) + + +def test_one_hot_encoder_feature_names_unicode(): + enc = OneHotEncoder() + X = np.array([["c❤t1", "dat2"]], dtype=object).T + enc.fit(X) + feature_names = enc.get_feature_names_out() + assert_array_equal(["x0_c❤t1", "x0_dat2"], feature_names) + feature_names = enc.get_feature_names_out(input_features=["n👍me"]) + assert_array_equal(["n👍me_c❤t1", "n👍me_dat2"], feature_names) + + +def test_one_hot_encoder_custom_feature_name_combiner(): + """Check the behaviour of `feature_name_combiner` as a callable.""" + + def name_combiner(feature, category): + return feature + "_" + repr(category) + + enc = OneHotEncoder(feature_name_combiner=name_combiner) + X = np.array([["None", None]], dtype=object).T + enc.fit(X) + feature_names = enc.get_feature_names_out() + assert_array_equal(["x0_'None'", "x0_None"], feature_names) + feature_names = enc.get_feature_names_out(input_features=["a"]) + assert_array_equal(["a_'None'", "a_None"], feature_names) + + def wrong_combiner(feature, category): + # we should be returning a Python string + return 0 + + enc = OneHotEncoder(feature_name_combiner=wrong_combiner).fit(X) + err_msg = ( + "When `feature_name_combiner` is a callable, it should return a Python string." + ) + with pytest.raises(TypeError, match=err_msg): + enc.get_feature_names_out() + + +def test_one_hot_encoder_set_params(): + X = np.array([[1, 2]]).T + oh = OneHotEncoder() + # set params on not yet fitted object + oh.set_params(categories=[[0, 1, 2, 3]]) + assert oh.get_params()["categories"] == [[0, 1, 2, 3]] + assert oh.fit_transform(X).toarray().shape == (2, 4) + # set params on already fitted object + oh.set_params(categories=[[0, 1, 2, 3, 4]]) + assert oh.fit_transform(X).toarray().shape == (2, 5) + + +def check_categorical_onehot(X): + enc = OneHotEncoder(categories="auto") + Xtr1 = enc.fit_transform(X) + + enc = OneHotEncoder(categories="auto", sparse_output=False) + Xtr2 = enc.fit_transform(X) + + assert_allclose(Xtr1.toarray(), Xtr2) + + assert sparse.issparse(Xtr1) and Xtr1.format == "csr" + return Xtr1.toarray() + + +@pytest.mark.parametrize( + "X", + [ + [["def", 1, 55], ["abc", 2, 55]], + np.array([[10, 1, 55], [5, 2, 55]]), + np.array([["b", "A", "cat"], ["a", "B", "cat"]], dtype=object), + np.array([["b", 1, "cat"], ["a", np.nan, "cat"]], dtype=object), + np.array([["b", 1, "cat"], ["a", float("nan"), "cat"]], dtype=object), + np.array([[None, 1, "cat"], ["a", 2, "cat"]], dtype=object), + np.array([[None, 1, None], ["a", np.nan, None]], dtype=object), + np.array([[None, 1, None], ["a", float("nan"), None]], dtype=object), + ], + ids=[ + "mixed", + "numeric", + "object", + "mixed-nan", + "mixed-float-nan", + "mixed-None", + "mixed-None-nan", + "mixed-None-float-nan", + ], +) +def test_one_hot_encoder(X): + Xtr = check_categorical_onehot(np.array(X)[:, [0]]) + assert_allclose(Xtr, [[0, 1], [1, 0]]) + + Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]]) + assert_allclose(Xtr, [[0, 1, 1, 0], [1, 0, 0, 1]]) + + Xtr = OneHotEncoder(categories="auto").fit_transform(X) + assert_allclose(Xtr.toarray(), [[0, 1, 1, 0, 1], [1, 0, 0, 1, 1]]) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +@pytest.mark.parametrize("sparse_", [False, True]) +@pytest.mark.parametrize("drop", [None, "first"]) +def test_one_hot_encoder_inverse(handle_unknown, sparse_, drop): + X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] + enc = OneHotEncoder(sparse_output=sparse_, drop=drop) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + X = [[2, 55], [1, 55], [3, 55]] + enc = OneHotEncoder(sparse_output=sparse_, categories="auto", drop=drop) + X_tr = enc.fit_transform(X) + exp = np.array(X) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + if drop is None: + # with unknown categories + # drop is incompatible with handle_unknown=ignore + X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] + enc = OneHotEncoder( + sparse_output=sparse_, + handle_unknown=handle_unknown, + categories=[["abc", "def"], [1, 2], [54, 55, 56]], + ) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + exp[2, 1] = None + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # with an otherwise numerical output, still object if unknown + X = [[2, 55], [1, 55], [3, 55]] + enc = OneHotEncoder( + sparse_output=sparse_, + categories=[[1, 2], [54, 56]], + handle_unknown=handle_unknown, + ) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + exp[2, 0] = None + exp[:, 1] = None + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # incorrect shape raises + X_tr = np.array([[0, 1, 1], [1, 0, 1]]) + msg = re.escape("Shape of the passed X data is not correct") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_tr) + + +@pytest.mark.parametrize("sparse_", [False, True]) +@pytest.mark.parametrize( + "X, X_trans", + [ + ([[2, 55], [1, 55], [2, 55]], [[0, 1, 1], [0, 0, 0], [0, 1, 1]]), + ( + [["one", "a"], ["two", "a"], ["three", "b"], ["two", "a"]], + [[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]], + ), + ], +) +def test_one_hot_encoder_inverse_transform_raise_error_with_unknown( + X, X_trans, sparse_ +): + """Check that `inverse_transform` raise an error with unknown samples, no + dropped feature, and `handle_unknow="error`. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/14934 + """ + enc = OneHotEncoder(sparse_output=sparse_).fit(X) + msg = ( + r"Samples \[(\d )*\d\] can not be inverted when drop=None and " + r"handle_unknown='error' because they contain all zeros" + ) + + if sparse_: + # emulate sparse data transform by a one-hot encoder sparse. + X_trans = _convert_container(X_trans, "sparse") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_trans) + + +def test_one_hot_encoder_inverse_if_binary(): + X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + X_tr = ohe.fit_transform(X) + assert_array_equal(ohe.inverse_transform(X_tr), X) + + +@pytest.mark.parametrize("drop", ["if_binary", "first", None]) +@pytest.mark.parametrize("reset_drop", ["if_binary", "first", None]) +def test_one_hot_encoder_drop_reset(drop, reset_drop): + # check that resetting drop option without refitting does not throw an error + X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) + ohe = OneHotEncoder(drop=drop, sparse_output=False) + ohe.fit(X) + X_tr = ohe.transform(X) + feature_names = ohe.get_feature_names_out() + ohe.set_params(drop=reset_drop) + assert_array_equal(ohe.inverse_transform(X_tr), X) + assert_allclose(ohe.transform(X), X_tr) + assert_array_equal(ohe.get_feature_names_out(), feature_names) + + +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +@pytest.mark.parametrize("X", [[1, 2], np.array([3.0, 4.0])]) +def test_X_is_not_1D(X, method): + oh = OneHotEncoder() + + msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=msg): + getattr(oh, method)(X) + + +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +def test_X_is_not_1D_pandas(method): + pd = pytest.importorskip("pandas") + X = pd.Series([6, 3, 4, 6]) + oh = OneHotEncoder() + + msg = f"Expected a 2-dimensional container but got {type(X)} instead." + with pytest.raises(ValueError, match=msg): + getattr(oh, method)(X) + + +@pytest.mark.parametrize( + "X, cat_exp, cat_dtype", + [ + ([["abc", 55], ["def", 55]], [["abc", "def"], [55]], np.object_), + (np.array([[1, 2], [3, 2]]), [[1, 3], [2]], np.integer), + ( + np.array([["A", "cat"], ["B", "cat"]], dtype=object), + [["A", "B"], ["cat"]], + np.object_, + ), + (np.array([["A", "cat"], ["B", "cat"]]), [["A", "B"], ["cat"]], np.str_), + (np.array([[1, 2], [np.nan, 2]]), [[1, np.nan], [2]], np.float64), + ( + np.array([["A", np.nan], [None, np.nan]], dtype=object), + [["A", None], [np.nan]], + np.object_, + ), + ( + np.array([["A", float("nan")], [None, float("nan")]], dtype=object), + [["A", None], [float("nan")]], + np.object_, + ), + ], + ids=[ + "mixed", + "numeric", + "object", + "string", + "missing-float", + "missing-np.nan-object", + "missing-float-nan-object", + ], +) +def test_one_hot_encoder_categories(X, cat_exp, cat_dtype): + # order of categories should not depend on order of samples + for Xi in [X, X[::-1]]: + enc = OneHotEncoder(categories="auto") + enc.fit(Xi) + # assert enc.categories == 'auto' + assert isinstance(enc.categories_, list) + for res, exp in zip(enc.categories_, cat_exp): + res_list = res.tolist() + if is_scalar_nan(exp[-1]): + assert is_scalar_nan(res_list[-1]) + assert res_list[:-1] == exp[:-1] + else: + assert res.tolist() == exp + assert np.issubdtype(res.dtype, cat_dtype) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [["a", "b", "c"]], + np.object_, + ), + ( + np.array([[1, 2]], dtype="int64").T, + np.array([[1, 4]], dtype="int64").T, + [[1, 2, 3]], + np.int64, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [np.array(["a", "b", "c"])], + np.object_, + ), + ( + np.array([[None, "a"]], dtype=object).T, + np.array([[None, "b"]], dtype=object).T, + [[None, "a", "z"]], + object, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", np.nan]], dtype=object).T, + [["a", "b", "z"]], + object, + ), + ( + np.array([["a", None]], dtype=object).T, + np.array([["a", np.nan]], dtype=object).T, + [["a", None, "z"]], + object, + ), + ], + ids=[ + "object", + "numeric", + "object-string", + "object-string-none", + "object-string-nan", + "object-None-and-nan", + ], +) +def test_one_hot_encoder_specified_categories(X, X2, cats, cat_dtype, handle_unknown): + enc = OneHotEncoder(categories=cats) + exp = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert list(enc.categories[0]) == list(cats[0]) + assert enc.categories_[0].tolist() == list(cats[0]) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert enc.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + enc = OneHotEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + enc.fit(X2) + enc = OneHotEncoder(categories=cats, handle_unknown=handle_unknown) + exp = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) + assert_array_equal(enc.fit(X2).transform(X2).toarray(), exp) + + +def test_one_hot_encoder_unsorted_categories(): + X = np.array([["a", "b"]], dtype=object).T + + enc = OneHotEncoder(categories=[["b", "a", "c"]]) + exp = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) + assert_array_equal(enc.fit(X).transform(X).toarray(), exp) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert enc.categories_[0].tolist() == ["b", "a", "c"] + assert np.issubdtype(enc.categories_[0].dtype, np.object_) + + # unsorted passed categories still raise for numerical values + X = np.array([[1, 2]]).T + enc = OneHotEncoder(categories=[[2, 1, 3]]) + msg = "Unsorted categories are not supported" + with pytest.raises(ValueError, match=msg): + enc.fit_transform(X) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_nan_ending_specified_categories(Encoder): + """Test encoder for specified categories that nan is at the end. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27088 + """ + cats = [np.array([0, np.nan, 1])] + enc = Encoder(categories=cats) + X = np.array([[0, 1]], dtype=object).T + with pytest.raises(ValueError, match="Nan should be the last element"): + enc.fit(X) + + +def test_one_hot_encoder_specified_categories_mixed_columns(): + # multiple columns + X = np.array([["a", "b"], [0, 2]], dtype=object).T + enc = OneHotEncoder(categories=[["a", "b", "c"], [0, 1, 2]]) + exp = np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]]) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert enc.categories_[0].tolist() == ["a", "b", "c"] + assert np.issubdtype(enc.categories_[0].dtype, np.object_) + assert enc.categories_[1].tolist() == [0, 1, 2] + # integer categories but from object dtype data + assert np.issubdtype(enc.categories_[1].dtype, np.object_) + + +def test_one_hot_encoder_pandas(): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + + Xtr = check_categorical_onehot(X_df) + assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]]) + + +@pytest.mark.parametrize( + "drop, expected_names", + [ + ("first", ["x0_c", "x2_b"]), + ("if_binary", ["x0_c", "x1_2", "x2_b"]), + (["c", 2, "b"], ["x0_b", "x2_a"]), + ], + ids=["first", "binary", "manual"], +) +def test_one_hot_encoder_feature_names_drop(drop, expected_names): + X = [["c", 2, "a"], ["b", 2, "b"]] + + ohe = OneHotEncoder(drop=drop) + ohe.fit(X) + feature_names = ohe.get_feature_names_out() + assert_array_equal(expected_names, feature_names) + + +def test_one_hot_encoder_drop_equals_if_binary(): + # Canonical case + X = [[10, "yes"], [20, "no"], [30, "yes"]] + expected = np.array( + [[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]] + ) + expected_drop_idx = np.array([None, 0]) + + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + result = ohe.fit_transform(X) + assert_array_equal(ohe.drop_idx_, expected_drop_idx) + assert_allclose(result, expected) + + # with only one cat, the behaviour is equivalent to drop=None + X = [["true", "a"], ["false", "a"], ["false", "a"]] + expected = np.array([[1.0, 1.0], [0.0, 1.0], [0.0, 1.0]]) + expected_drop_idx = np.array([0, None]) + + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + result = ohe.fit_transform(X) + assert_array_equal(ohe.drop_idx_, expected_drop_idx) + assert_allclose(result, expected) + + +@pytest.mark.parametrize( + "X", + [ + [["abc", 2, 55], ["def", 1, 55]], + np.array([[10, 2, 55], [20, 1, 55]]), + np.array([["a", "B", "cat"], ["b", "A", "cat"]], dtype=object), + ], + ids=["mixed", "numeric", "object"], +) +def test_ordinal_encoder(X): + enc = OrdinalEncoder() + exp = np.array([[0, 1, 0], [1, 0, 0]], dtype="int64") + assert_array_equal(enc.fit_transform(X), exp.astype("float64")) + enc = OrdinalEncoder(dtype="int64") + assert_array_equal(enc.fit_transform(X), exp) + + +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [["a", "b", "c"]], + np.object_, + ), + ( + np.array([[1, 2]], dtype="int64").T, + np.array([[1, 4]], dtype="int64").T, + [[1, 2, 3]], + np.int64, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [np.array(["a", "b", "c"])], + np.object_, + ), + ], + ids=["object", "numeric", "object-string-cat"], +) +def test_ordinal_encoder_specified_categories(X, X2, cats, cat_dtype): + enc = OrdinalEncoder(categories=cats) + exp = np.array([[0.0], [1.0]]) + assert_array_equal(enc.fit_transform(X), exp) + assert list(enc.categories[0]) == list(cats[0]) + assert enc.categories_[0].tolist() == list(cats[0]) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert enc.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + enc = OrdinalEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + enc.fit(X2) + + +def test_ordinal_encoder_inverse(): + X = [["abc", 2, 55], ["def", 1, 55]] + enc = OrdinalEncoder() + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # incorrect shape raises + X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]]) + msg = re.escape("Shape of the passed X data is not correct") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_tr) + + +def test_ordinal_encoder_handle_unknowns_string(): + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-2) + X_fit = np.array([["a", "x"], ["b", "y"], ["c", "z"]], dtype=object) + X_trans = np.array([["c", "xy"], ["bla", "y"], ["a", "x"]], dtype=object) + enc.fit(X_fit) + + X_trans_enc = enc.transform(X_trans) + exp = np.array([[2, -2], [-2, 1], [0, 0]], dtype="int64") + assert_array_equal(X_trans_enc, exp) + + X_trans_inv = enc.inverse_transform(X_trans_enc) + inv_exp = np.array([["c", None], [None, "y"], ["a", "x"]], dtype=object) + assert_array_equal(X_trans_inv, inv_exp) + + +@pytest.mark.parametrize("dtype", [float, int]) +def test_ordinal_encoder_handle_unknowns_numeric(dtype): + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-999) + X_fit = np.array([[1, 7], [2, 8], [3, 9]], dtype=dtype) + X_trans = np.array([[3, 12], [23, 8], [1, 7]], dtype=dtype) + enc.fit(X_fit) + + X_trans_enc = enc.transform(X_trans) + exp = np.array([[2, -999], [-999, 1], [0, 0]], dtype="int64") + assert_array_equal(X_trans_enc, exp) + + X_trans_inv = enc.inverse_transform(X_trans_enc) + inv_exp = np.array([[3, None], [None, 8], [1, 7]], dtype=object) + assert_array_equal(X_trans_inv, inv_exp) + + +def test_ordinal_encoder_handle_unknowns_nan(): + # Make sure unknown_value=np.nan properly works + + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan) + + X_fit = np.array([[1], [2], [3]]) + enc.fit(X_fit) + X_trans = enc.transform([[1], [2], [4]]) + assert_array_equal(X_trans, [[0], [1], [np.nan]]) + + +def test_ordinal_encoder_handle_unknowns_nan_non_float_dtype(): + # Make sure an error is raised when unknown_value=np.nan and the dtype + # isn't a float dtype + enc = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=np.nan, dtype=int + ) + + X_fit = np.array([[1], [2], [3]]) + with pytest.raises(ValueError, match="dtype parameter should be a float dtype"): + enc.fit(X_fit) + + +def test_ordinal_encoder_raise_categories_shape(): + X = np.array([["Low", "Medium", "High", "Medium", "Low"]], dtype=object).T + cats = ["Low", "Medium", "High"] + enc = OrdinalEncoder(categories=cats) + msg = "Shape mismatch: if categories is an array," + + with pytest.raises(ValueError, match=msg): + enc.fit(X) + + +def test_encoder_dtypes(): + # check that dtypes are preserved when determining categories + enc = OneHotEncoder(categories="auto") + exp = np.array([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]], dtype="float64") + + for X in [ + np.array([[1, 2], [3, 4]], dtype="int64"), + np.array([[1, 2], [3, 4]], dtype="float64"), + np.array([["a", "b"], ["c", "d"]]), # str dtype + np.array([[b"a", b"b"], [b"c", b"d"]]), # bytes dtype + np.array([[1, "a"], [3, "b"]], dtype="object"), + ]: + enc.fit(X) + assert all([enc.categories_[i].dtype == X.dtype for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = [[1, 2], [3, 4]] + enc.fit(X) + assert all([np.issubdtype(enc.categories_[i].dtype, np.integer) for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = [[1, "a"], [3, "b"]] + enc.fit(X) + assert all([enc.categories_[i].dtype == "object" for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + +def test_encoder_dtypes_pandas(): + # check dtype (similar to test_categorical_encoder_dtypes for dataframes) + pd = pytest.importorskip("pandas") + + enc = OneHotEncoder(categories="auto") + exp = np.array( + [[1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]], + dtype="float64", + ) + + X = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}, dtype="int64") + enc.fit(X) + assert all([enc.categories_[i].dtype == "int64" for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = pd.DataFrame({"A": [1, 2], "B": ["a", "b"], "C": [3.0, 4.0]}) + X_type = [X["A"].dtype, X["B"].dtype, X["C"].dtype] + enc.fit(X) + assert all([enc.categories_[i].dtype == X_type[i] for i in range(3)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + +def test_one_hot_encoder_warning(): + enc = OneHotEncoder() + X = [["Male", 1], ["Female", 3]] + np.testing.assert_no_warnings(enc.fit_transform, X) + + +@pytest.mark.parametrize("missing_value", [np.nan, None, float("nan")]) +def test_one_hot_encoder_drop_manual(missing_value): + cats_to_drop = ["def", 12, 3, 56, missing_value] + enc = OneHotEncoder(drop=cats_to_drop) + X = [ + ["abc", 12, 2, 55, "a"], + ["def", 12, 1, 55, "a"], + ["def", 12, 3, 56, missing_value], + ] + trans = enc.fit_transform(X).toarray() + exp = [[1, 0, 1, 1, 1], [0, 1, 0, 1, 1], [0, 0, 0, 0, 0]] + assert_array_equal(trans, exp) + assert enc.drop is cats_to_drop + + dropped_cats = [ + cat[feature] for cat, feature in zip(enc.categories_, enc.drop_idx_) + ] + X_inv_trans = enc.inverse_transform(trans) + X_array = np.array(X, dtype=object) + + # last value is np.nan + if is_scalar_nan(cats_to_drop[-1]): + assert_array_equal(dropped_cats[:-1], cats_to_drop[:-1]) + assert is_scalar_nan(dropped_cats[-1]) + assert is_scalar_nan(cats_to_drop[-1]) + # do not include the last column which includes missing values + assert_array_equal(X_array[:, :-1], X_inv_trans[:, :-1]) + + # check last column is the missing value + assert_array_equal(X_array[-1, :-1], X_inv_trans[-1, :-1]) + assert is_scalar_nan(X_array[-1, -1]) + assert is_scalar_nan(X_inv_trans[-1, -1]) + else: + assert_array_equal(dropped_cats, cats_to_drop) + assert_array_equal(X_array, X_inv_trans) + + +@pytest.mark.parametrize("drop", [["abc", 3], ["abc", 3, 41, "a"]]) +def test_invalid_drop_length(drop): + enc = OneHotEncoder(drop=drop) + err_msg = "`drop` should have length equal to the number" + with pytest.raises(ValueError, match=err_msg): + enc.fit([["abc", 2, 55], ["def", 1, 55], ["def", 3, 59]]) + + +@pytest.mark.parametrize("density", [True, False], ids=["sparse", "dense"]) +@pytest.mark.parametrize("drop", ["first", ["a", 2, "b"]], ids=["first", "manual"]) +def test_categories(density, drop): + ohe_base = OneHotEncoder(sparse_output=density) + ohe_test = OneHotEncoder(sparse_output=density, drop=drop) + X = [["c", 1, "a"], ["a", 2, "b"]] + ohe_base.fit(X) + ohe_test.fit(X) + assert_array_equal(ohe_base.categories_, ohe_test.categories_) + if drop == "first": + assert_array_equal(ohe_test.drop_idx_, 0) + else: + for drop_cat, drop_idx, cat_list in zip( + drop, ohe_test.drop_idx_, ohe_test.categories_ + ): + assert cat_list[int(drop_idx)] == drop_cat + assert isinstance(ohe_test.drop_idx_, np.ndarray) + assert ohe_test.drop_idx_.dtype == object + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoders_has_categorical_tags(Encoder): + assert "categorical" in Encoder()._get_tags()["X_types"] + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 2}, + {"min_frequency": 11}, + {"min_frequency": 0.29}, + {"max_categories": 2, "min_frequency": 6}, + {"max_categories": 4, "min_frequency": 12}, + ], +) +@pytest.mark.parametrize("categories", ["auto", [["a", "b", "c", "d"]]]) +def test_ohe_infrequent_two_levels(kwargs, categories): + """Test that different parameters for combine 'a', 'c', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + categories=categories, + handle_unknown="infrequent_if_exist", + sparse_output=False, + **kwargs, + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "c", "d"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_b", "x0_infrequent_sklearn"], feature_names) + + +@pytest.mark.parametrize("drop", ["if_binary", "first", ["b"]]) +def test_ohe_infrequent_two_levels_drop_frequent(drop): + """Test two levels and dropping the frequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=2, + drop=drop, + ).fit(X_train) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "b" + + X_test = np.array([["b"], ["c"]]) + X_trans = ohe.transform(X_test) + assert_allclose([[0], [1]], X_trans) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_infrequent_sklearn"], feature_names) + + X_inverse = ohe.inverse_transform(X_trans) + assert_array_equal([["b"], ["infrequent_sklearn"]], X_inverse) + + +@pytest.mark.parametrize("drop", [["a"], ["d"]]) +def test_ohe_infrequent_two_levels_drop_infrequent_errors(drop): + """Test two levels and dropping any infrequent category removes the + whole infrequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=2, + drop=drop, + ) + + msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" + with pytest.raises(ValueError, match=msg): + ohe.fit(X_train) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 3}, + {"min_frequency": 6}, + {"min_frequency": 9}, + {"min_frequency": 0.24}, + {"min_frequency": 0.16}, + {"max_categories": 3, "min_frequency": 8}, + {"max_categories": 4, "min_frequency": 6}, + ], +) +def test_ohe_infrequent_three_levels(kwargs): + """Test that different parameters for combing 'a', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + expected_inv = [ + ["b"], + ["infrequent_sklearn"], + ["c"], + ["infrequent_sklearn"], + ["infrequent_sklearn"], + ] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_b", "x0_c", "x0_infrequent_sklearn"], feature_names) + + +@pytest.mark.parametrize("drop", ["first", ["b"]]) +def test_ohe_infrequent_three_levels_drop_frequent(drop): + """Test three levels and dropping the frequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=3, + drop=drop, + ).fit(X_train) + + X_test = np.array([["b"], ["c"], ["d"]]) + assert_allclose([[0, 0], [1, 0], [0, 1]], ohe.transform(X_test)) + + # Check handle_unknown="ignore" + ohe.set_params(handle_unknown="ignore").fit(X_train) + msg = "Found unknown categories" + with pytest.warns(UserWarning, match=msg): + X_trans = ohe.transform([["b"], ["e"]]) + + assert_allclose([[0, 0], [0, 0]], X_trans) + + +@pytest.mark.parametrize("drop", [["a"], ["d"]]) +def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop): + """Test three levels and dropping the infrequent category.""" + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=3, + drop=drop, + ) + + msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" + with pytest.raises(ValueError, match=msg): + ohe.fit(X_train) + + +def test_ohe_infrequent_handle_unknown_error(): + """Test that different parameters for combining 'a', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="error", sparse_output=False, max_categories=3 + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) + + # all categories are known + X_test = [["b"], ["a"], ["c"], ["d"]] + expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'bad' is not known and will error + X_test = [["bad"]] + msg = r"Found unknown categories \['bad'\] in column 0" + with pytest.raises(ValueError, match=msg): + ohe.transform(X_test) + + +@pytest.mark.parametrize( + "kwargs", [{"max_categories": 3, "min_frequency": 1}, {"min_frequency": 4}] +) +def test_ohe_infrequent_two_levels_user_cats_one_frequent(kwargs): + """'a' is the only frequent category, all other categories are infrequent.""" + + X_train = np.array([["a"] * 5 + ["e"] * 30], dtype=object).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + **kwargs, + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'a' is dropped + drops = ["first", "if_binary", ["a"]] + X_test = [["a"], ["c"]] + for drop in drops: + ohe.set_params(drop=drop).fit(X_train) + assert_allclose([[0], [1]], ohe.transform(X_test)) + + +def test_ohe_infrequent_two_levels_user_cats(): + """Test that the order of the categories provided by a user is respected.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + max_categories=2, + ).fit(X_train) + + assert_array_equal(ohe.infrequent_categories_, [["c", "d", "a"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'infrequent' is used to denote the infrequent categories for + # `inverse_transform` + expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_three_levels_user_cats(): + """Test that the order of the categories provided by a user is respected. + In this case 'c' is encoded as the first category and 'b' is encoded + as the second one.""" + + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ohe = OneHotEncoder( + categories=[["c", "d", "b", "a"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + max_categories=3, + ).fit(X_train) + + assert_array_equal(ohe.infrequent_categories_, [["d", "a"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'infrequent' is used to denote the infrequent categories for + # `inverse_transform` + expected_inv = [ + ["b"], + ["infrequent_sklearn"], + ["c"], + ["infrequent_sklearn"], + ["infrequent_sklearn"], + ] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_mixed(): + """Test infrequent categories where feature 0 has infrequent categories, + and feature 1 does not.""" + + # X[:, 0] 1 and 2 are infrequent + # X[:, 1] nothing is infrequent + X = np.c_[[0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1]] + + ohe = OneHotEncoder(max_categories=3, drop="if_binary", sparse_output=False) + ohe.fit(X) + + X_test = [[3, 0], [1, 1]] + X_trans = ohe.transform(X_test) + + # feature 1 is binary so it drops a category 0 + assert_allclose(X_trans, [[0, 1, 0, 0], [0, 0, 1, 1]]) + + +def test_ohe_infrequent_multiple_categories(): + """Test infrequent categories with feature matrix with 3 features.""" + + X = np.c_[ + [0, 1, 3, 3, 3, 3, 2, 0, 3], + [0, 0, 5, 1, 1, 10, 5, 5, 0], + [1, 0, 1, 0, 1, 0, 1, 0, 1], + ] + + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" + ) + # X[:, 0] 1 and 2 are infrequent + # X[:, 1] 1 and 10 are infrequent + # X[:, 2] nothing is infrequent + + X_trans = ohe.fit_transform(X).toarray() + assert_array_equal(ohe.infrequent_categories_[0], [1, 2]) + assert_array_equal(ohe.infrequent_categories_[1], [1, 10]) + assert_array_equal(ohe.infrequent_categories_[2], None) + + # 'infrequent' is used to denote the infrequent categories + # For the first column, 1 and 2 have the same frequency. In this case, + # 1 will be chosen to be the feature name because is smaller lexiconically + feature_names = ohe.get_feature_names_out() + assert_array_equal( + [ + "x0_0", + "x0_3", + "x0_infrequent_sklearn", + "x1_0", + "x1_5", + "x1_infrequent_sklearn", + "x2_0", + "x2_1", + ], + feature_names, + ) + + expected = [ + [1, 0, 0, 1, 0, 0, 0, 1], + [0, 0, 1, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 1, 0, 0, 1], + [0, 1, 0, 0, 0, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0, 1], + [0, 1, 0, 0, 0, 1, 1, 0], + [0, 0, 1, 0, 1, 0, 0, 1], + [1, 0, 0, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 0, 0, 1], + ] + + assert_allclose(expected, X_trans) + + X_test = [[3, 1, 2], [4, 0, 3]] + + X_test_trans = ohe.transform(X_test) + + # X[:, 2] does not have an infrequent category, thus it is encoded as all + # zeros + expected = [[0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0]] + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [[3, "infrequent_sklearn", None], ["infrequent_sklearn", 0, None]], dtype=object + ) + assert_array_equal(expected_inv, X_inv) + + # error for unknown categories + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="error" + ).fit(X) + with pytest.raises(ValueError, match="Found unknown categories"): + ohe.transform(X_test) + + # only infrequent or known categories + X_test = [[1, 1, 1], [3, 10, 0]] + X_test_trans = ohe.transform(X_test) + + expected = [[0, 0, 1, 0, 0, 1, 0, 1], [0, 1, 0, 0, 0, 1, 1, 0]] + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + + expected_inv = np.array( + [["infrequent_sklearn", "infrequent_sklearn", 1], [3, "infrequent_sklearn", 0]], + dtype=object, + ) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_multiple_categories_dtypes(): + """Test infrequent categories with a pandas dataframe with multiple dtypes.""" + + pd = pytest.importorskip("pandas") + X = pd.DataFrame( + { + "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], + "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], + }, + columns=["str", "int"], + ) + + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" + ) + # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be + # considered infrequent because they are greater + + # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. + # 0, 3, 12 will be considered infrequent + + X_trans = ohe.fit_transform(X).toarray() + assert_array_equal(ohe.infrequent_categories_[0], ["a", "b"]) + assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12]) + + expected = [ + [0, 0, 1, 1, 0, 0], + [0, 1, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 1], + [0, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1], + [1, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 0, 1], + [0, 0, 1, 1, 0, 0], + ] + + assert_allclose(expected, X_trans) + + X_test = pd.DataFrame({"str": ["b", "f"], "int": [14, 12]}, columns=["str", "int"]) + + expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]] + X_test_trans = ohe.transform(X_test) + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [["infrequent_sklearn", "infrequent_sklearn"], ["f", "infrequent_sklearn"]], + dtype=object, + ) + assert_array_equal(expected_inv, X_inv) + + # only infrequent or known categories + X_test = pd.DataFrame({"str": ["c", "b"], "int": [12, 5]}, columns=["str", "int"]) + X_test_trans = ohe.transform(X_test).toarray() + expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]] + assert_allclose(expected, X_test_trans) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [["c", "infrequent_sklearn"], ["infrequent_sklearn", 5]], dtype=object + ) + assert_array_equal(expected_inv, X_inv) + + +@pytest.mark.parametrize("kwargs", [{"min_frequency": 21, "max_categories": 1}]) +def test_ohe_infrequent_one_level_errors(kwargs): + """All user provided categories are infrequent.""" + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 2]).T + + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs + ) + ohe.fit(X_train) + + X_trans = ohe.transform([["a"]]) + assert_allclose(X_trans, [[1]]) + + +@pytest.mark.parametrize("kwargs", [{"min_frequency": 2, "max_categories": 3}]) +def test_ohe_infrequent_user_cats_unknown_training_errors(kwargs): + """All user provided categories are infrequent.""" + + X_train = np.array([["e"] * 3], dtype=object).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + **kwargs, + ).fit(X_train) + + X_trans = ohe.transform([["a"], ["e"]]) + assert_allclose(X_trans, [[1], [1]]) + + +# deliberately omit 'OS' as an invalid combo +@pytest.mark.parametrize( + "input_dtype, category_dtype", ["OO", "OU", "UO", "UU", "SO", "SU", "SS"] +) +@pytest.mark.parametrize("array_type", ["list", "array", "dataframe"]) +def test_encoders_string_categories(input_dtype, category_dtype, array_type): + """Check that encoding work with object, unicode, and byte string dtypes. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15616 + https://github.com/scikit-learn/scikit-learn/issues/15726 + https://github.com/scikit-learn/scikit-learn/issues/19677 + """ + + X = np.array([["b"], ["a"]], dtype=input_dtype) + categories = [np.array(["b", "a"], dtype=category_dtype)] + ohe = OneHotEncoder(categories=categories, sparse_output=False).fit(X) + + X_test = _convert_container( + [["a"], ["a"], ["b"], ["a"]], array_type, dtype=input_dtype + ) + X_trans = ohe.transform(X_test) + + expected = np.array([[0, 1], [0, 1], [1, 0], [0, 1]]) + assert_allclose(X_trans, expected) + + oe = OrdinalEncoder(categories=categories).fit(X) + X_trans = oe.transform(X_test) + + expected = np.array([[1], [1], [0], [1]]) + assert_array_equal(X_trans, expected) + + +def test_mixed_string_bytes_categoricals(): + """Check that this mixture of predefined categories and X raises an error. + + Categories defined as bytes can not easily be compared to data that is + a string. + """ + # data as unicode + X = np.array([["b"], ["a"]], dtype="U") + # predefined categories as bytes + categories = [np.array(["b", "a"], dtype="S")] + ohe = OneHotEncoder(categories=categories, sparse_output=False) + + msg = re.escape( + "In column 0, the predefined categories have type 'bytes' which is incompatible" + " with values of type 'str_'." + ) + + with pytest.raises(ValueError, match=msg): + ohe.fit(X) + + +@pytest.mark.parametrize("missing_value", [np.nan, None]) +def test_ohe_missing_values_get_feature_names(missing_value): + # encoder with missing values with object dtypes + X = np.array([["a", "b", missing_value, "a", missing_value]], dtype=object).T + ohe = OneHotEncoder(sparse_output=False, handle_unknown="ignore").fit(X) + names = ohe.get_feature_names_out() + assert_array_equal(names, ["x0_a", "x0_b", f"x0_{missing_value}"]) + + +def test_ohe_missing_value_support_pandas(): + # check support for pandas with mixed dtypes and missing values + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "col1": ["dog", "cat", None, "cat"], + "col2": np.array([3, 0, 4, np.nan], dtype=float), + }, + columns=["col1", "col2"], + ) + expected_df_trans = np.array( + [ + [0, 1, 0, 0, 1, 0, 0], + [1, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0, 1], + ] + ) + + Xtr = check_categorical_onehot(df) + assert_allclose(Xtr, expected_df_trans) + + +@pytest.mark.parametrize("handle_unknown", ["infrequent_if_exist", "ignore"]) +@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"]) +def test_ohe_missing_value_support_pandas_categorical(pd_nan_type, handle_unknown): + # checks pandas dataframe with categorical features + pd = pytest.importorskip("pandas") + + pd_missing_value = pd.NA if pd_nan_type == "pd.NA" else np.nan + + df = pd.DataFrame( + { + "col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"), + } + ) + expected_df_trans = np.array( + [ + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [1, 0, 0, 0], + ] + ) + + ohe = OneHotEncoder(sparse_output=False, handle_unknown=handle_unknown) + df_trans = ohe.fit_transform(df) + assert_allclose(expected_df_trans, df_trans) + + assert len(ohe.categories_) == 1 + assert_array_equal(ohe.categories_[0][:-1], ["a", "b", "c"]) + assert np.isnan(ohe.categories_[0][-1]) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_first_handle_unknown_ignore_warns(handle_unknown): + """Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist' + during transform.""" + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="first", sparse_output=False, handle_unknown=handle_unknown + ) + X_trans = ohe.fit_transform(X) + + X_expected = np.array( + [ + [0, 0, 0], + [1, 0, 1], + [1, 1, 0], + ] + ) + assert_allclose(X_trans, X_expected) + + # Both categories are unknown + X_test = [["c", 3]] + X_expected = np.array([[0, 0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0, 1\] during " + "transform. These unknown categories will be encoded as all " + "zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + # inverse_transform maps to None + X_inv = ohe.inverse_transform(X_expected) + assert_array_equal(X_inv, np.array([["a", 0]], dtype=object)) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_if_binary_handle_unknown_ignore_warns(handle_unknown): + """Check drop='if_binary' and handle_unknown='ignore' during transform.""" + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="if_binary", sparse_output=False, handle_unknown=handle_unknown + ) + X_trans = ohe.fit_transform(X) + + X_expected = np.array( + [ + [0, 1, 0, 0], + [1, 0, 0, 1], + [1, 0, 1, 0], + ] + ) + assert_allclose(X_trans, X_expected) + + # Both categories are unknown + X_test = [["c", 3]] + X_expected = np.array([[0, 0, 0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0, 1\] during " + "transform. These unknown categories will be encoded as all " + "zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + # inverse_transform maps to None + X_inv = ohe.inverse_transform(X_expected) + assert_array_equal(X_inv, np.array([["a", None]], dtype=object)) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_first_explicit_categories(handle_unknown): + """Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist' + during fit with categories passed in.""" + + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="first", + sparse_output=False, + handle_unknown=handle_unknown, + categories=[["b", "a"], [1, 2]], + ) + ohe.fit(X) + + X_test = [["c", 1]] + X_expected = np.array([[0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0\] during transform. " + r"These unknown categories will be encoded as all zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + +def test_ohe_more_informative_error_message(): + """Raise informative error message when pandas output and sparse_output=True.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": [1, 2, 3], "b": ["z", "b", "b"]}, columns=["a", "b"]) + + ohe = OneHotEncoder(sparse_output=True) + ohe.set_output(transform="pandas") + + msg = ( + "Pandas output does not support sparse data. Set " + "sparse_output=False to output pandas dataframes or disable Pandas output" + ) + with pytest.raises(ValueError, match=msg): + ohe.fit_transform(df) + + ohe.fit(df) + with pytest.raises(ValueError, match=msg): + ohe.transform(df) + + +def test_ordinal_encoder_passthrough_missing_values_float_errors_dtype(): + """Test ordinal encoder with nan passthrough fails when dtype=np.int32.""" + + X = np.array([[np.nan, 3.0, 1.0, 3.0]]).T + oe = OrdinalEncoder(dtype=np.int32) + + msg = ( + r"There are missing values in features \[0\]. For OrdinalEncoder " + f"to encode missing values with dtype: {np.int32}" + ) + with pytest.raises(ValueError, match=msg): + oe.fit(X) + + +@pytest.mark.parametrize("encoded_missing_value", [np.nan, -2]) +def test_ordinal_encoder_passthrough_missing_values_float(encoded_missing_value): + """Test ordinal encoder with nan on float dtypes.""" + + X = np.array([[np.nan, 3.0, 1.0, 3.0]], dtype=np.float64).T + oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(X) + + assert len(oe.categories_) == 1 + + assert_allclose(oe.categories_[0], [1.0, 3.0, np.nan]) + + X_trans = oe.transform(X) + assert_allclose(X_trans, [[encoded_missing_value], [1.0], [0.0], [1.0]]) + + X_inverse = oe.inverse_transform(X_trans) + assert_allclose(X_inverse, X) + + +@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"]) +@pytest.mark.parametrize("encoded_missing_value", [np.nan, -2]) +def test_ordinal_encoder_missing_value_support_pandas_categorical( + pd_nan_type, encoded_missing_value +): + """Check ordinal encoder is compatible with pandas.""" + # checks pandas dataframe with categorical features + pd = pytest.importorskip("pandas") + + pd_missing_value = pd.NA if pd_nan_type == "pd.NA" else np.nan + + df = pd.DataFrame( + { + "col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"), + } + ) + + oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(df) + assert len(oe.categories_) == 1 + assert_array_equal(oe.categories_[0][:3], ["a", "b", "c"]) + assert np.isnan(oe.categories_[0][-1]) + + df_trans = oe.transform(df) + + assert_allclose(df_trans, [[2.0], [0.0], [encoded_missing_value], [1.0], [0.0]]) + + X_inverse = oe.inverse_transform(df_trans) + assert X_inverse.shape == (5, 1) + assert_array_equal(X_inverse[:2, 0], ["c", "a"]) + assert_array_equal(X_inverse[3:, 0], ["b", "a"]) + assert np.isnan(X_inverse[2, 0]) + + +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + ( + np.array([["a", np.nan]], dtype=object).T, + np.array([["a", "b"]], dtype=object).T, + [np.array(["a", "d", np.nan], dtype=object)], + np.object_, + ) + ), + ( + ( + np.array([["a", np.nan]], dtype=object).T, + np.array([["a", "b"]], dtype=object).T, + [np.array(["a", "d", np.nan], dtype=object)], + np.object_, + ) + ), + ( + ( + np.array([[2.0, np.nan]], dtype=np.float64).T, + np.array([[3.0]], dtype=np.float64).T, + [np.array([2.0, 4.0, np.nan])], + np.float64, + ) + ), + ], + ids=[ + "object-None-missing-value", + "object-nan-missing_value", + "numeric-missing-value", + ], +) +def test_ordinal_encoder_specified_categories_missing_passthrough( + X, X2, cats, cat_dtype +): + """Test ordinal encoder for specified categories.""" + oe = OrdinalEncoder(categories=cats) + exp = np.array([[0.0], [np.nan]]) + assert_array_equal(oe.fit_transform(X), exp) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert oe.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + oe = OrdinalEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + oe.fit(X2) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_duplicate_specified_categories(Encoder): + """Test encoder for specified categories have duplicate values. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27088 + """ + cats = [np.array(["a", "b", "a"], dtype=object)] + enc = Encoder(categories=cats) + X = np.array([["a", "b"]], dtype=object).T + with pytest.raises( + ValueError, match="the predefined categories contain duplicate elements." + ): + enc.fit(X) + + +@pytest.mark.parametrize( + "X, expected_X_trans, X_test", + [ + ( + np.array([[1.0, np.nan, 3.0]]).T, + np.array([[0.0, np.nan, 1.0]]).T, + np.array([[4.0]]), + ), + ( + np.array([[1.0, 4.0, 3.0]]).T, + np.array([[0.0, 2.0, 1.0]]).T, + np.array([[np.nan]]), + ), + ( + np.array([["c", np.nan, "b"]], dtype=object).T, + np.array([[1.0, np.nan, 0.0]]).T, + np.array([["d"]], dtype=object), + ), + ( + np.array([["c", "a", "b"]], dtype=object).T, + np.array([[2.0, 0.0, 1.0]]).T, + np.array([[np.nan]], dtype=object), + ), + ], +) +def test_ordinal_encoder_handle_missing_and_unknown(X, expected_X_trans, X_test): + """Test the interaction between missing values and handle_unknown""" + + oe = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1) + + X_trans = oe.fit_transform(X) + assert_allclose(X_trans, expected_X_trans) + + assert_allclose(oe.transform(X_test), [[-1.0]]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ordinal_encoder_sparse(csr_container): + """Check that we raise proper error with sparse input in OrdinalEncoder. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19878 + """ + X = np.array([[3, 2, 1], [0, 1, 1]]) + X_sparse = csr_container(X) + + encoder = OrdinalEncoder() + + err_msg = "Sparse data was passed, but dense data is required" + with pytest.raises(TypeError, match=err_msg): + encoder.fit(X_sparse) + with pytest.raises(TypeError, match=err_msg): + encoder.fit_transform(X_sparse) + + X_trans = encoder.fit_transform(X) + X_trans_sparse = csr_container(X_trans) + with pytest.raises(TypeError, match=err_msg): + encoder.inverse_transform(X_trans_sparse) + + +def test_ordinal_encoder_fit_with_unseen_category(): + """Check OrdinalEncoder.fit works with unseen category when + `handle_unknown="use_encoded_value"`. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19872 + """ + X = np.array([0, 0, 1, 0, 2, 5])[:, np.newaxis] + oe = OrdinalEncoder( + categories=[[-1, 0, 1]], handle_unknown="use_encoded_value", unknown_value=-999 + ) + oe.fit(X) + + oe = OrdinalEncoder(categories=[[-1, 0, 1]], handle_unknown="error") + with pytest.raises(ValueError, match="Found unknown categories"): + oe.fit(X) + + +@pytest.mark.parametrize( + "X_train", + [ + [["AA", "B"]], + np.array([["AA", "B"]], dtype="O"), + np.array([["AA", "B"]], dtype="U"), + ], +) +@pytest.mark.parametrize( + "X_test", + [ + [["A", "B"]], + np.array([["A", "B"]], dtype="O"), + np.array([["A", "B"]], dtype="U"), + ], +) +def test_ordinal_encoder_handle_unknown_string_dtypes(X_train, X_test): + """Checks that `OrdinalEncoder` transforms string dtypes. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19872 + """ + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-9) + enc.fit(X_train) + + X_trans = enc.transform(X_test) + assert_allclose(X_trans, [[-9, 0]]) + + +def test_ordinal_encoder_python_integer(): + """Check that `OrdinalEncoder` accepts Python integers that are potentially + larger than 64 bits. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20721 + """ + X = np.array( + [ + 44253463435747313673, + 9867966753463435747313673, + 44253462342215747313673, + 442534634357764313673, + ] + ).reshape(-1, 1) + encoder = OrdinalEncoder().fit(X) + assert_array_equal(encoder.categories_, np.sort(X, axis=0).T) + X_trans = encoder.transform(X) + assert_array_equal(X_trans, [[0], [3], [2], [1]]) + + +def test_ordinal_encoder_features_names_out_pandas(): + """Check feature names out is same as the input.""" + pd = pytest.importorskip("pandas") + + names = ["b", "c", "a"] + X = pd.DataFrame([[1, 2, 3]], columns=names) + enc = OrdinalEncoder().fit(X) + + feature_names_out = enc.get_feature_names_out() + assert_array_equal(names, feature_names_out) + + +def test_ordinal_encoder_unknown_missing_interaction(): + """Check interactions between encode_unknown and missing value encoding.""" + + X = np.array([["a"], ["b"], [np.nan]], dtype=object) + + oe = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=np.nan, + encoded_missing_value=-3, + ).fit(X) + + X_trans = oe.transform(X) + assert_allclose(X_trans, [[0], [1], [-3]]) + + # "c" is unknown and is mapped to np.nan + # "None" is a missing value and is set to -3 + X_test = np.array([["c"], [np.nan]], dtype=object) + X_test_trans = oe.transform(X_test) + assert_allclose(X_test_trans, [[np.nan], [-3]]) + + # Non-regression test for #24082 + X_roundtrip = oe.inverse_transform(X_test_trans) + + # np.nan is unknown so it maps to None + assert X_roundtrip[0][0] is None + + # -3 is the encoded missing value so it maps back to nan + assert np.isnan(X_roundtrip[1][0]) + + +@pytest.mark.parametrize("with_pandas", [True, False]) +def test_ordinal_encoder_encoded_missing_value_error(with_pandas): + """Check OrdinalEncoder errors when encoded_missing_value is used by + an known category.""" + X = np.array([["a", "dog"], ["b", "cat"], ["c", np.nan]], dtype=object) + + # The 0-th feature has no missing values so it is not included in the list of + # features + error_msg = ( + r"encoded_missing_value \(1\) is already used to encode a known category " + r"in features: " + ) + + if with_pandas: + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X, columns=["letter", "pet"]) + error_msg = error_msg + r"\['pet'\]" + else: + error_msg = error_msg + r"\[1\]" + + oe = OrdinalEncoder(encoded_missing_value=1) + + with pytest.raises(ValueError, match=error_msg): + oe.fit(X) + + +@pytest.mark.parametrize( + "X_train, X_test_trans_expected, X_roundtrip_expected", + [ + ( + # missing value is not in training set + # inverse transform will considering encoded nan as unknown + np.array([["a"], ["1"]], dtype=object), + [[0], [np.nan], [np.nan]], + np.asarray([["1"], [None], [None]], dtype=object), + ), + ( + # missing value in training set, + # inverse transform will considering encoded nan as missing + np.array([[np.nan], ["1"], ["a"]], dtype=object), + [[0], [np.nan], [np.nan]], + np.asarray([["1"], [np.nan], [np.nan]], dtype=object), + ), + ], +) +def test_ordinal_encoder_unknown_missing_interaction_both_nan( + X_train, X_test_trans_expected, X_roundtrip_expected +): + """Check transform when unknown_value and encoded_missing_value is nan. + + Non-regression test for #24082. + """ + oe = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=np.nan, + encoded_missing_value=np.nan, + ).fit(X_train) + + X_test = np.array([["1"], [np.nan], ["b"]]) + X_test_trans = oe.transform(X_test) + + # both nan and unknown are encoded as nan + assert_allclose(X_test_trans, X_test_trans_expected) + X_roundtrip = oe.inverse_transform(X_test_trans) + + n_samples = X_roundtrip_expected.shape[0] + for i in range(n_samples): + expected_val = X_roundtrip_expected[i, 0] + val = X_roundtrip[i, 0] + + if expected_val is None: + assert val is None + elif is_scalar_nan(expected_val): + assert np.isnan(val) + else: + assert val == expected_val + + +def test_one_hot_encoder_set_output(): + """Check OneHotEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + ohe = OneHotEncoder() + + ohe.set_output(transform="pandas") + + match = "Pandas output does not support sparse data. Set sparse_output=False" + with pytest.raises(ValueError, match=match): + ohe.fit_transform(X_df) + + ohe_default = OneHotEncoder(sparse_output=False).set_output(transform="default") + ohe_pandas = OneHotEncoder(sparse_output=False).set_output(transform="pandas") + + X_default = ohe_default.fit_transform(X_df) + X_pandas = ohe_pandas.fit_transform(X_df) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(ohe_pandas.get_feature_names_out(), X_pandas.columns) + + +def test_ordinal_set_output(): + """Check OrdinalEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + + ord_default = OrdinalEncoder().set_output(transform="default") + ord_pandas = OrdinalEncoder().set_output(transform="pandas") + + X_default = ord_default.fit_transform(X_df) + X_pandas = ord_pandas.fit_transform(X_df) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(ord_pandas.get_feature_names_out(), X_pandas.columns) + + +def test_predefined_categories_dtype(): + """Check that the categories_ dtype is `object` for string categories + + Regression test for gh-25171. + """ + categories = [["as", "mmas", "eas", "ras", "acs"], ["1", "2"]] + + enc = OneHotEncoder(categories=categories) + + enc.fit([["as", "1"]]) + + assert len(categories) == len(enc.categories_) + for n, cat in enumerate(enc.categories_): + assert cat.dtype == object + assert_array_equal(categories[n], cat) + + +def test_ordinal_encoder_missing_unknown_encoding_max(): + """Check missing value or unknown encoding can equal the cardinality.""" + X = np.array([["dog"], ["cat"], [np.nan]], dtype=object) + X_trans = OrdinalEncoder(encoded_missing_value=2).fit_transform(X) + assert_allclose(X_trans, [[1], [0], [2]]) + + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=2).fit(X) + X_test = np.array([["snake"]]) + X_trans = enc.transform(X_test) + assert_allclose(X_trans, [[2]]) + + +def test_drop_idx_infrequent_categories(): + """Check drop_idx is defined correctly with infrequent categories. + + Non-regression test for gh-25550. + """ + X = np.array( + [["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object + ).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="first").fit(X) + assert_array_equal( + ohe.get_feature_names_out(), ["x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"] + ) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "b" + + X = np.array([["a"] * 2 + ["b"] * 2 + ["c"] * 10], dtype=object).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="if_binary").fit(X) + assert_array_equal(ohe.get_feature_names_out(), ["x0_infrequent_sklearn"]) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "c" + + X = np.array( + [["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object + ).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=["d"]).fit(X) + assert_array_equal( + ohe.get_feature_names_out(), ["x0_b", "x0_c", "x0_e", "x0_infrequent_sklearn"] + ) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "d" + + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=None).fit(X) + assert_array_equal( + ohe.get_feature_names_out(), + ["x0_b", "x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"], + ) + assert ohe.drop_idx_ is None + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 3}, + {"min_frequency": 6}, + {"min_frequency": 9}, + {"min_frequency": 0.24}, + {"min_frequency": 0.16}, + {"max_categories": 3, "min_frequency": 8}, + {"max_categories": 4, "min_frequency": 6}, + ], +) +def test_ordinal_encoder_infrequent_three_levels(kwargs): + """Test parameters for grouping 'a', and 'd' into the infrequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ordinal = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=-1, **kwargs + ).fit(X_train) + assert_array_equal(ordinal.categories_, [["a", "b", "c", "d"]]) + assert_array_equal(ordinal.infrequent_categories_, [["a", "d"]]) + + X_test = [["a"], ["b"], ["c"], ["d"], ["z"]] + expected_trans = [[2], [0], [1], [2], [-1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = [ + ["infrequent_sklearn"], + ["b"], + ["c"], + ["infrequent_sklearn"], + [None], + ] + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_three_levels_user_cats(): + """Test that the order of the categories provided by a user is respected. + + In this case 'c' is encoded as the first category and 'b' is encoded + as the second one. + """ + + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ordinal = OrdinalEncoder( + categories=[["c", "d", "b", "a"]], + max_categories=3, + handle_unknown="use_encoded_value", + unknown_value=-1, + ).fit(X_train) + assert_array_equal(ordinal.categories_, [["c", "d", "b", "a"]]) + assert_array_equal(ordinal.infrequent_categories_, [["d", "a"]]) + + X_test = [["a"], ["b"], ["c"], ["d"], ["z"]] + expected_trans = [[2], [1], [0], [2], [-1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = [ + ["infrequent_sklearn"], + ["b"], + ["c"], + ["infrequent_sklearn"], + [None], + ] + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_mixed(): + """Test when feature 0 has infrequent categories and feature 1 does not.""" + + X = np.column_stack(([0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1])) + + ordinal = OrdinalEncoder(max_categories=3).fit(X) + + assert_array_equal(ordinal.infrequent_categories_[0], [1, 2]) + assert ordinal.infrequent_categories_[1] is None + + X_test = [[3, 0], [1, 1]] + expected_trans = [[1, 0], [2, 1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = np.array([[3, 0], ["infrequent_sklearn", 1]], dtype=object) + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_multiple_categories_dtypes(): + """Test infrequent categories with a pandas DataFrame with multiple dtypes.""" + + pd = pytest.importorskip("pandas") + categorical_dtype = pd.CategoricalDtype(["bird", "cat", "dog", "snake"]) + X = pd.DataFrame( + { + "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], + "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], + "categorical": pd.Series( + ["dog"] * 4 + ["cat"] * 3 + ["snake"] + ["bird"], + dtype=categorical_dtype, + ), + }, + columns=["str", "int", "categorical"], + ) + + ordinal = OrdinalEncoder(max_categories=3).fit(X) + # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be + # considered infrequent because they appear first when sorted + + # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. + # 0, 3, 12 will be considered infrequent because they appear first when + # sorted. + + # X[:, 2] "snake" and "bird" or infrequent + + assert_array_equal(ordinal.infrequent_categories_[0], ["a", "b"]) + assert_array_equal(ordinal.infrequent_categories_[1], [0, 3, 12]) + assert_array_equal(ordinal.infrequent_categories_[2], ["bird", "snake"]) + + X_test = pd.DataFrame( + { + "str": ["a", "b", "f", "c"], + "int": [12, 0, 10, 5], + "categorical": pd.Series( + ["cat"] + ["snake"] + ["bird"] + ["dog"], + dtype=categorical_dtype, + ), + }, + columns=["str", "int", "categorical"], + ) + expected_trans = [[2, 2, 0], [2, 2, 2], [1, 1, 2], [0, 0, 1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + +def test_ordinal_encoder_infrequent_custom_mapping(): + """Check behavior of unknown_value and encoded_missing_value with infrequent.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]], dtype=object + ).T + + ordinal = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=2, + max_categories=2, + encoded_missing_value=3, + ).fit(X_train) + assert_array_equal(ordinal.infrequent_categories_, [["a", "c", "d"]]) + + X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object) + expected_trans = [[1], [0], [1], [1], [2], [3]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 6}, + {"min_frequency": 2}, + ], +) +def test_ordinal_encoder_all_frequent(kwargs): + """All categories are considered frequent have same encoding as default encoder.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + + adjusted_encoder = OrdinalEncoder( + **kwargs, handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + default_encoder = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + + assert_allclose( + adjusted_encoder.transform(X_test), default_encoder.transform(X_test) + ) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 1}, + {"min_frequency": 100}, + ], +) +def test_ordinal_encoder_all_infrequent(kwargs): + """When all categories are infrequent, they are all encoded as zero.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + encoder = OrdinalEncoder( + **kwargs, handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + assert_allclose(encoder.transform(X_test), [[0], [0], [0], [0], [-1]]) + + +def test_ordinal_encoder_missing_appears_frequent(): + """Check behavior when missing value appears frequently.""" + X = np.array( + [[np.nan] * 20 + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"]], + dtype=object, + ).T + ordinal = OrdinalEncoder(max_categories=3).fit(X) + + X_test = np.array([["snake", "cat", "dog", np.nan]], dtype=object).T + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, [[2], [0], [1], [np.nan]]) + + +def test_ordinal_encoder_missing_appears_infrequent(): + """Check behavior when missing value appears infrequently.""" + + # feature 0 has infrequent categories + # feature 1 has no infrequent categories + X = np.array( + [ + [np.nan] + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"], + ["red"] * 9 + ["green"] * 9, + ], + dtype=object, + ).T + ordinal = OrdinalEncoder(min_frequency=4).fit(X) + + X_test = np.array( + [ + ["snake", "red"], + ["deer", "green"], + [np.nan, "green"], + ["dog", "green"], + ["cat", "red"], + ], + dtype=object, + ) + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, [[2, 1], [2, 0], [np.nan, 0], [1, 0], [0, 1]]) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_not_fitted(Encoder): + """Check that we raise a `NotFittedError` by calling transform before fit with + the encoders. + + One could expect that the passing the `categories` argument to the encoder + would make it stateless. However, `fit` is making a couple of check, such as the + position of `np.nan`. + """ + X = np.array([["A"], ["B"], ["C"]], dtype=object) + encoder = Encoder(categories=[["A", "B", "C"]]) + with pytest.raises(NotFittedError): + encoder.transform(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..b97500d43ef731b47fa5788a8bc9bd8ec47fd32a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py @@ -0,0 +1,1258 @@ +import sys + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal +from scipy import sparse +from scipy.interpolate import BSpline +from scipy.sparse import random as sparse_random + +from sklearn.linear_model import LinearRegression +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import ( + KBinsDiscretizer, + PolynomialFeatures, + SplineTransformer, +) +from sklearn.preprocessing._csr_polynomial_expansion import ( + _calc_expanded_nnz, + _calc_total_nnz, + _get_sizeof_LARGEST_INT_t, +) +from sklearn.utils._testing import assert_array_almost_equal +from sklearn.utils.fixes import ( + CSC_CONTAINERS, + CSR_CONTAINERS, + parse_version, + sp_version, +) + + +@pytest.mark.parametrize("est", (PolynomialFeatures, SplineTransformer)) +def test_polynomial_and_spline_array_order(est): + """Test that output array has the given order.""" + X = np.arange(10).reshape(5, 2) + + def is_c_contiguous(a): + return np.isfortran(a.T) + + assert is_c_contiguous(est().fit_transform(X)) + assert is_c_contiguous(est(order="C").fit_transform(X)) + assert np.isfortran(est(order="F").fit_transform(X)) + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ({"knots": [[1]]}, r"Number of knots, knots.shape\[0\], must be >= 2."), + ({"knots": [[1, 1], [2, 2]]}, r"knots.shape\[1\] == n_features is violated"), + ({"knots": [[1], [0]]}, "knots must be sorted without duplicates."), + ], +) +def test_spline_transformer_input_validation(params, err_msg): + """Test that we raise errors for invalid input in SplineTransformer.""" + X = [[1], [2]] + + with pytest.raises(ValueError, match=err_msg): + SplineTransformer(**params).fit(X) + + +@pytest.mark.parametrize("extrapolation", ["continue", "periodic"]) +def test_spline_transformer_integer_knots(extrapolation): + """Test that SplineTransformer accepts integer value knot positions.""" + X = np.arange(20).reshape(10, 2) + knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]] + _ = SplineTransformer( + degree=3, knots=knots, extrapolation=extrapolation + ).fit_transform(X) + + +def test_spline_transformer_feature_names(): + """Test that SplineTransformer generates correct features name.""" + X = np.arange(20).reshape(10, 2) + splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X) + feature_names = splt.get_feature_names_out() + assert_array_equal( + feature_names, + [ + "x0_sp_0", + "x0_sp_1", + "x0_sp_2", + "x0_sp_3", + "x0_sp_4", + "x1_sp_0", + "x1_sp_1", + "x1_sp_2", + "x1_sp_3", + "x1_sp_4", + ], + ) + + splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X) + feature_names = splt.get_feature_names_out(["a", "b"]) + assert_array_equal( + feature_names, + [ + "a_sp_0", + "a_sp_1", + "a_sp_2", + "a_sp_3", + "b_sp_0", + "b_sp_1", + "b_sp_2", + "b_sp_3", + ], + ) + + +@pytest.mark.parametrize( + "extrapolation", + ["constant", "linear", "continue", "periodic"], +) +@pytest.mark.parametrize("degree", [2, 3]) +def test_split_transform_feature_names_extrapolation_degree(extrapolation, degree): + """Test feature names are correct for different extrapolations and degree. + + Non-regression test for gh-25292. + """ + X = np.arange(20).reshape(10, 2) + splt = SplineTransformer(degree=degree, extrapolation=extrapolation).fit(X) + feature_names = splt.get_feature_names_out(["a", "b"]) + assert len(feature_names) == splt.n_features_out_ + + X_trans = splt.transform(X) + assert X_trans.shape[1] == len(feature_names) + + +@pytest.mark.parametrize("degree", range(1, 5)) +@pytest.mark.parametrize("n_knots", range(3, 5)) +@pytest.mark.parametrize("knots", ["uniform", "quantile"]) +@pytest.mark.parametrize("extrapolation", ["constant", "periodic"]) +def test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation): + """Test that B-splines are indeed a decomposition of unity. + + Splines basis functions must sum up to 1 per row, if we stay in between boundaries. + """ + X = np.linspace(0, 1, 100)[:, None] + # make the boundaries 0 and 1 part of X_train, for sure. + X_train = np.r_[[[0]], X[::2, :], [[1]]] + X_test = X[1::2, :] + + if extrapolation == "periodic": + n_knots = n_knots + degree # periodic splines require degree < n_knots + + splt = SplineTransformer( + n_knots=n_knots, + degree=degree, + knots=knots, + include_bias=True, + extrapolation=extrapolation, + ) + splt.fit(X_train) + for X in [X_train, X_test]: + assert_allclose(np.sum(splt.transform(X), axis=1), 1) + + +@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) +def test_spline_transformer_linear_regression(bias, intercept): + """Test that B-splines fit a sinusodial curve pretty well.""" + X = np.linspace(0, 10, 100)[:, None] + y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose + pipe = Pipeline( + steps=[ + ( + "spline", + SplineTransformer( + n_knots=15, + degree=3, + include_bias=bias, + extrapolation="constant", + ), + ), + ("ols", LinearRegression(fit_intercept=intercept)), + ] + ) + pipe.fit(X, y) + assert_allclose(pipe.predict(X), y, rtol=1e-3) + + +@pytest.mark.parametrize( + ["knots", "n_knots", "sample_weight", "expected_knots"], + [ + ("uniform", 3, None, np.array([[0, 2], [3, 8], [6, 14]])), + ( + "uniform", + 3, + np.array([0, 0, 1, 1, 0, 3, 1]), + np.array([[2, 2], [4, 8], [6, 14]]), + ), + ("uniform", 4, None, np.array([[0, 2], [2, 6], [4, 10], [6, 14]])), + ("quantile", 3, None, np.array([[0, 2], [3, 3], [6, 14]])), + ( + "quantile", + 3, + np.array([0, 0, 1, 1, 0, 3, 1]), + np.array([[2, 2], [5, 8], [6, 14]]), + ), + ], +) +def test_spline_transformer_get_base_knot_positions( + knots, n_knots, sample_weight, expected_knots +): + """Check the behaviour to find knot positions with and without sample_weight.""" + X = np.array([[0, 2], [0, 2], [2, 2], [3, 3], [4, 6], [5, 8], [6, 14]]) + base_knots = SplineTransformer._get_base_knot_positions( + X=X, knots=knots, n_knots=n_knots, sample_weight=sample_weight + ) + assert_allclose(base_knots, expected_knots) + + +@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) +def test_spline_transformer_periodic_linear_regression(bias, intercept): + """Test that B-splines fit a periodic curve pretty well.""" + + # "+ 3" to avoid the value 0 in assert_allclose + def f(x): + return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3 + + X = np.linspace(0, 1, 101)[:, None] + pipe = Pipeline( + steps=[ + ( + "spline", + SplineTransformer( + n_knots=20, + degree=3, + include_bias=bias, + extrapolation="periodic", + ), + ), + ("ols", LinearRegression(fit_intercept=intercept)), + ] + ) + pipe.fit(X, f(X[:, 0])) + + # Generate larger array to check periodic extrapolation + X_ = np.linspace(-1, 2, 301)[:, None] + predictions = pipe.predict(X_) + assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01) + assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3) + + +def test_spline_transformer_periodic_spline_backport(): + """Test that the backport of extrapolate="periodic" works correctly""" + X = np.linspace(-2, 3.5, 10)[:, None] + degree = 2 + + # Use periodic extrapolation backport in SplineTransformer + transformer = SplineTransformer( + degree=degree, extrapolation="periodic", knots=[[-1.0], [0.0], [1.0]] + ) + Xt = transformer.fit_transform(X) + + # Use periodic extrapolation in BSpline + coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) + spl = BSpline(np.arange(-3, 4), coef, degree, "periodic") + Xspl = spl(X[:, 0]) + assert_allclose(Xt, Xspl) + + +def test_spline_transformer_periodic_splines_periodicity(): + """Test if shifted knots result in the same transformation up to permutation.""" + X = np.linspace(0, 10, 101)[:, None] + + transformer_1 = SplineTransformer( + degree=3, + extrapolation="periodic", + knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]], + ) + + transformer_2 = SplineTransformer( + degree=3, + extrapolation="periodic", + knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]], + ) + + Xt_1 = transformer_1.fit_transform(X) + Xt_2 = transformer_2.fit_transform(X) + + assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]]) + + +@pytest.mark.parametrize("degree", [3, 5]) +def test_spline_transformer_periodic_splines_smoothness(degree): + """Test that spline transformation is smooth at first / last knot.""" + X = np.linspace(-2, 10, 10_000)[:, None] + + transformer = SplineTransformer( + degree=degree, + extrapolation="periodic", + knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]], + ) + Xt = transformer.fit_transform(X) + + delta = (X.max() - X.min()) / len(X) + tol = 10 * delta + + dXt = Xt + # We expect splines of degree `degree` to be (`degree`-1) times + # continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th + # derivative should be continuous. This is the case if the (d+1)-th + # numerical derivative is reasonably small (smaller than `tol` in absolute + # value). We thus compute d-th numeric derivatives for d = 1, ..., `degree` + # and compare them to `tol`. + # + # Note that the 0-th derivative is the function itself, such that we are + # also checking its continuity. + for d in range(1, degree + 1): + # Check continuity of the (d-1)-th derivative + diff = np.diff(dXt, axis=0) + assert np.abs(diff).max() < tol + # Compute d-th numeric derivative + dXt = diff / delta + + # As degree `degree` splines are not `degree` times continuously + # differentiable at the knots, the `degree + 1`-th numeric derivative + # should have spikes at the knots. + diff = np.diff(dXt, axis=0) + assert np.abs(diff).max() > 1 + + +@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) +@pytest.mark.parametrize("degree", [1, 2, 3, 4, 5]) +def test_spline_transformer_extrapolation(bias, intercept, degree): + """Test that B-spline extrapolation works correctly.""" + # we use a straight line for that + X = np.linspace(-1, 1, 100)[:, None] + y = X.squeeze() + + # 'constant' + pipe = Pipeline( + [ + [ + "spline", + SplineTransformer( + n_knots=4, + degree=degree, + include_bias=bias, + extrapolation="constant", + ), + ], + ["ols", LinearRegression(fit_intercept=intercept)], + ] + ) + pipe.fit(X, y) + assert_allclose(pipe.predict([[-10], [5]]), [-1, 1]) + + # 'linear' + pipe = Pipeline( + [ + [ + "spline", + SplineTransformer( + n_knots=4, + degree=degree, + include_bias=bias, + extrapolation="linear", + ), + ], + ["ols", LinearRegression(fit_intercept=intercept)], + ] + ) + pipe.fit(X, y) + assert_allclose(pipe.predict([[-10], [5]]), [-10, 5]) + + # 'error' + splt = SplineTransformer( + n_knots=4, degree=degree, include_bias=bias, extrapolation="error" + ) + splt.fit(X) + msg = "X contains values beyond the limits of the knots" + with pytest.raises(ValueError, match=msg): + splt.transform([[-10]]) + with pytest.raises(ValueError, match=msg): + splt.transform([[5]]) + + +def test_spline_transformer_kbindiscretizer(): + """Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer.""" + rng = np.random.RandomState(97531) + X = rng.randn(200).reshape(200, 1) + n_bins = 5 + n_knots = n_bins + 1 + + splt = SplineTransformer( + n_knots=n_knots, degree=0, knots="quantile", include_bias=True + ) + splines = splt.fit_transform(X) + + kbd = KBinsDiscretizer(n_bins=n_bins, encode="onehot-dense", strategy="quantile") + kbins = kbd.fit_transform(X) + + # Though they should be exactly equal, we test approximately with high + # accuracy. + assert_allclose(splines, kbins, rtol=1e-13) + + +@pytest.mark.skipif( + sp_version < parse_version("1.8.0"), + reason="The option `sparse_output` is available as of scipy 1.8.0", +) +@pytest.mark.parametrize("degree", range(1, 3)) +@pytest.mark.parametrize("knots", ["uniform", "quantile"]) +@pytest.mark.parametrize( + "extrapolation", ["error", "constant", "linear", "continue", "periodic"] +) +@pytest.mark.parametrize("include_bias", [False, True]) +def test_spline_transformer_sparse_output( + degree, knots, extrapolation, include_bias, global_random_seed +): + rng = np.random.RandomState(global_random_seed) + X = rng.randn(200).reshape(40, 5) + + splt_dense = SplineTransformer( + degree=degree, + knots=knots, + extrapolation=extrapolation, + include_bias=include_bias, + sparse_output=False, + ) + splt_sparse = SplineTransformer( + degree=degree, + knots=knots, + extrapolation=extrapolation, + include_bias=include_bias, + sparse_output=True, + ) + + splt_dense.fit(X) + splt_sparse.fit(X) + + X_trans_sparse = splt_sparse.transform(X) + X_trans_dense = splt_dense.transform(X) + assert sparse.issparse(X_trans_sparse) and X_trans_sparse.format == "csr" + assert_allclose(X_trans_dense, X_trans_sparse.toarray()) + + # extrapolation regime + X_min = np.amin(X, axis=0) + X_max = np.amax(X, axis=0) + X_extra = np.r_[ + np.linspace(X_min - 5, X_min, 10), np.linspace(X_max, X_max + 5, 10) + ] + if extrapolation == "error": + msg = "X contains values beyond the limits of the knots" + with pytest.raises(ValueError, match=msg): + splt_dense.transform(X_extra) + msg = "Out of bounds" + with pytest.raises(ValueError, match=msg): + splt_sparse.transform(X_extra) + else: + assert_allclose( + splt_dense.transform(X_extra), splt_sparse.transform(X_extra).toarray() + ) + + +@pytest.mark.skipif( + sp_version >= parse_version("1.8.0"), + reason="The option `sparse_output` is available as of scipy 1.8.0", +) +def test_spline_transformer_sparse_output_raise_error_for_old_scipy(): + """Test that SplineTransformer with sparse=True raises for scipy<1.8.0.""" + X = [[1], [2]] + with pytest.raises(ValueError, match="scipy>=1.8.0"): + SplineTransformer(sparse_output=True).fit(X) + + +@pytest.mark.parametrize("n_knots", [5, 10]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("degree", [3, 4]) +@pytest.mark.parametrize( + "extrapolation", ["error", "constant", "linear", "continue", "periodic"] +) +@pytest.mark.parametrize("sparse_output", [False, True]) +def test_spline_transformer_n_features_out( + n_knots, include_bias, degree, extrapolation, sparse_output +): + """Test that transform results in n_features_out_ features.""" + if sparse_output and sp_version < parse_version("1.8.0"): + pytest.skip("The option `sparse_output` is available as of scipy 1.8.0") + + splt = SplineTransformer( + n_knots=n_knots, + degree=degree, + include_bias=include_bias, + extrapolation=extrapolation, + sparse_output=sparse_output, + ) + X = np.linspace(0, 1, 10)[:, None] + splt.fit(X) + + assert splt.transform(X).shape[1] == splt.n_features_out_ + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ({"degree": (-1, 2)}, r"degree=\(min_degree, max_degree\) must"), + ({"degree": (0, 1.5)}, r"degree=\(min_degree, max_degree\) must"), + ({"degree": (3, 2)}, r"degree=\(min_degree, max_degree\) must"), + ({"degree": (1, 2, 3)}, r"int or tuple \(min_degree, max_degree\)"), + ], +) +def test_polynomial_features_input_validation(params, err_msg): + """Test that we raise errors for invalid input in PolynomialFeatures.""" + X = [[1], [2]] + + with pytest.raises(ValueError, match=err_msg): + PolynomialFeatures(**params).fit(X) + + +@pytest.fixture() +def single_feature_degree3(): + X = np.arange(6)[:, np.newaxis] + P = np.hstack([np.ones_like(X), X, X**2, X**3]) + return X, P + + +@pytest.mark.parametrize( + "degree, include_bias, interaction_only, indices", + [ + (3, True, False, slice(None, None)), + (3, False, False, slice(1, None)), + (3, True, True, [0, 1]), + (3, False, True, [1]), + ((2, 3), True, False, [0, 2, 3]), + ((2, 3), False, False, [2, 3]), + ((2, 3), True, True, [0]), + ((2, 3), False, True, []), + ], +) +@pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS) +def test_polynomial_features_one_feature( + single_feature_degree3, + degree, + include_bias, + interaction_only, + indices, + X_container, +): + """Test PolynomialFeatures on single feature up to degree 3.""" + X, P = single_feature_degree3 + if X_container is not None: + X = X_container(X) + tf = PolynomialFeatures( + degree=degree, include_bias=include_bias, interaction_only=interaction_only + ).fit(X) + out = tf.transform(X) + if X_container is not None: + out = out.toarray() + assert_allclose(out, P[:, indices]) + if tf.n_output_features_ > 0: + assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_) + + +@pytest.fixture() +def two_features_degree3(): + X = np.arange(6).reshape((3, 2)) + x1 = X[:, :1] + x2 = X[:, 1:] + P = np.hstack( + [ + x1**0 * x2**0, # 0 + x1**1 * x2**0, # 1 + x1**0 * x2**1, # 2 + x1**2 * x2**0, # 3 + x1**1 * x2**1, # 4 + x1**0 * x2**2, # 5 + x1**3 * x2**0, # 6 + x1**2 * x2**1, # 7 + x1**1 * x2**2, # 8 + x1**0 * x2**3, # 9 + ] + ) + return X, P + + +@pytest.mark.parametrize( + "degree, include_bias, interaction_only, indices", + [ + (2, True, False, slice(0, 6)), + (2, False, False, slice(1, 6)), + (2, True, True, [0, 1, 2, 4]), + (2, False, True, [1, 2, 4]), + ((2, 2), True, False, [0, 3, 4, 5]), + ((2, 2), False, False, [3, 4, 5]), + ((2, 2), True, True, [0, 4]), + ((2, 2), False, True, [4]), + (3, True, False, slice(None, None)), + (3, False, False, slice(1, None)), + (3, True, True, [0, 1, 2, 4]), + (3, False, True, [1, 2, 4]), + ((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]), + ((2, 3), False, False, slice(3, None)), + ((2, 3), True, True, [0, 4]), + ((2, 3), False, True, [4]), + ((3, 3), True, False, [0, 6, 7, 8, 9]), + ((3, 3), False, False, [6, 7, 8, 9]), + ((3, 3), True, True, [0]), + ((3, 3), False, True, []), # would need 3 input features + ], +) +@pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS) +def test_polynomial_features_two_features( + two_features_degree3, + degree, + include_bias, + interaction_only, + indices, + X_container, +): + """Test PolynomialFeatures on 2 features up to degree 3.""" + X, P = two_features_degree3 + if X_container is not None: + X = X_container(X) + tf = PolynomialFeatures( + degree=degree, include_bias=include_bias, interaction_only=interaction_only + ).fit(X) + out = tf.transform(X) + if X_container is not None: + out = out.toarray() + assert_allclose(out, P[:, indices]) + if tf.n_output_features_ > 0: + assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_) + + +def test_polynomial_feature_names(): + X = np.arange(30).reshape(10, 3) + poly = PolynomialFeatures(degree=2, include_bias=True).fit(X) + feature_names = poly.get_feature_names_out() + assert_array_equal( + ["1", "x0", "x1", "x2", "x0^2", "x0 x1", "x0 x2", "x1^2", "x1 x2", "x2^2"], + feature_names, + ) + assert len(feature_names) == poly.transform(X).shape[1] + + poly = PolynomialFeatures(degree=3, include_bias=False).fit(X) + feature_names = poly.get_feature_names_out(["a", "b", "c"]) + assert_array_equal( + [ + "a", + "b", + "c", + "a^2", + "a b", + "a c", + "b^2", + "b c", + "c^2", + "a^3", + "a^2 b", + "a^2 c", + "a b^2", + "a b c", + "a c^2", + "b^3", + "b^2 c", + "b c^2", + "c^3", + ], + feature_names, + ) + assert len(feature_names) == poly.transform(X).shape[1] + + poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X) + feature_names = poly.get_feature_names_out(["a", "b", "c"]) + assert_array_equal( + [ + "a^2", + "a b", + "a c", + "b^2", + "b c", + "c^2", + "a^3", + "a^2 b", + "a^2 c", + "a b^2", + "a b c", + "a c^2", + "b^3", + "b^2 c", + "b c^2", + "c^3", + ], + feature_names, + ) + assert len(feature_names) == poly.transform(X).shape[1] + + poly = PolynomialFeatures( + degree=(3, 3), include_bias=True, interaction_only=True + ).fit(X) + feature_names = poly.get_feature_names_out(["a", "b", "c"]) + assert_array_equal(["1", "a b c"], feature_names) + assert len(feature_names) == poly.transform(X).shape[1] + + # test some unicode + poly = PolynomialFeatures(degree=1, include_bias=True).fit(X) + feature_names = poly.get_feature_names_out(["\u0001F40D", "\u262e", "\u05d0"]) + assert_array_equal(["1", "\u0001F40D", "\u262e", "\u05d0"], feature_names) + + +@pytest.mark.parametrize( + ["deg", "include_bias", "interaction_only", "dtype"], + [ + (1, True, False, int), + (2, True, False, int), + (2, True, False, np.float32), + (2, True, False, np.float64), + (3, False, False, np.float64), + (3, False, True, np.float64), + (4, False, False, np.float64), + (4, False, True, np.float64), + ], +) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_polynomial_features_csc_X( + deg, include_bias, interaction_only, dtype, csc_container +): + rng = np.random.RandomState(0) + X = rng.randint(0, 2, (100, 2)) + X_csc = csc_container(X) + + est = PolynomialFeatures( + deg, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csc = est.fit_transform(X_csc.astype(dtype)) + Xt_dense = est.fit_transform(X.astype(dtype)) + + assert sparse.issparse(Xt_csc) and Xt_csc.format == "csc" + assert Xt_csc.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csc.toarray(), Xt_dense) + + +@pytest.mark.parametrize( + ["deg", "include_bias", "interaction_only", "dtype"], + [ + (1, True, False, int), + (2, True, False, int), + (2, True, False, np.float32), + (2, True, False, np.float64), + (3, False, False, np.float64), + (3, False, True, np.float64), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X( + deg, include_bias, interaction_only, dtype, csr_container +): + rng = np.random.RandomState(0) + X = rng.randint(0, 2, (100, 2)) + X_csr = csr_container(X) + + est = PolynomialFeatures( + deg, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csr = est.fit_transform(X_csr.astype(dtype)) + Xt_dense = est.fit_transform(X.astype(dtype, copy=False)) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize("n_features", [1, 4, 5]) +@pytest.mark.parametrize( + "min_degree, max_degree", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)] +) +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_num_combinations( + n_features, min_degree, max_degree, interaction_only, include_bias, csr_container +): + """ + Test that n_output_features_ is calculated correctly. + """ + x = csr_container(([1], ([0], [n_features - 1]))) + est = PolynomialFeatures( + degree=max_degree, + interaction_only=interaction_only, + include_bias=include_bias, + ) + est.fit(x) + num_combos = est.n_output_features_ + + combos = PolynomialFeatures._combinations( + n_features=n_features, + min_degree=0, + max_degree=max_degree, + interaction_only=interaction_only, + include_bias=include_bias, + ) + assert num_combos == sum([1 for _ in combos]) + + +@pytest.mark.parametrize( + ["deg", "include_bias", "interaction_only", "dtype"], + [ + (2, True, False, np.float32), + (2, True, False, np.float64), + (3, False, False, np.float64), + (3, False, True, np.float64), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_floats( + deg, include_bias, interaction_only, dtype, csr_container +): + X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=0)) + X = X_csr.toarray() + + est = PolynomialFeatures( + deg, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csr = est.fit_transform(X_csr.astype(dtype)) + Xt_dense = est.fit_transform(X.astype(dtype)) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize( + ["zero_row_index", "deg", "interaction_only"], + [ + (0, 2, True), + (1, 2, True), + (2, 2, True), + (0, 3, True), + (1, 3, True), + (2, 3, True), + (0, 2, False), + (1, 2, False), + (2, 2, False), + (0, 3, False), + (1, 3, False), + (2, 3, False), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_zero_row( + zero_row_index, deg, interaction_only, csr_container +): + X_csr = csr_container(sparse_random(3, 10, 1.0, random_state=0)) + X_csr[zero_row_index, :] = 0.0 + X = X_csr.toarray() + + est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only) + Xt_csr = est.fit_transform(X_csr) + Xt_dense = est.fit_transform(X) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +# This degree should always be one more than the highest degree supported by +# _csr_expansion. +@pytest.mark.parametrize( + ["include_bias", "interaction_only"], + [(True, True), (True, False), (False, True), (False, False)], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_degree_4( + include_bias, interaction_only, csr_container +): + X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=0)) + X = X_csr.toarray() + + est = PolynomialFeatures( + 4, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csr = est.fit_transform(X_csr) + Xt_dense = est.fit_transform(X) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize( + ["deg", "dim", "interaction_only"], + [ + (2, 1, True), + (2, 2, True), + (3, 1, True), + (3, 2, True), + (3, 3, True), + (2, 1, False), + (2, 2, False), + (3, 1, False), + (3, 2, False), + (3, 3, False), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only, csr_container): + X_csr = csr_container(sparse_random(1000, dim, 0.5, random_state=0)) + X = X_csr.toarray() + + est = PolynomialFeatures(deg, interaction_only=interaction_only) + Xt_csr = est.fit_transform(X_csr) + Xt_dense = est.fit_transform(X) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_index_overflow_non_regression( + interaction_only, include_bias, csr_container +): + """Check the automatic index dtype promotion to `np.int64` when needed. + + This ensures that sufficiently large input configurations get + properly promoted to use `np.int64` for index and indptr representation + while preserving data integrity. Non-regression test for gh-16803. + + Note that this is only possible for Python runtimes with a 64 bit address + space. On 32 bit platforms, a `ValueError` is raised instead. + """ + + def degree_2_calc(d, i, j): + if interaction_only: + return d * i - (i**2 + 3 * i) // 2 - 1 + j + else: + return d * i - (i**2 + i) // 2 + j + + n_samples = 13 + n_features = 120001 + data_dtype = np.float32 + data = np.arange(1, 5, dtype=np.int64) + row = np.array([n_samples - 2, n_samples - 2, n_samples - 1, n_samples - 1]) + # An int64 dtype is required to avoid overflow error on Windows within the + # `degree_2_calc` function. + col = np.array( + [n_features - 2, n_features - 1, n_features - 2, n_features - 1], dtype=np.int64 + ) + X = csr_container( + (data, (row, col)), + shape=(n_samples, n_features), + dtype=data_dtype, + ) + pf = PolynomialFeatures( + interaction_only=interaction_only, include_bias=include_bias, degree=2 + ) + + # Calculate the number of combinations a-priori, and if needed check for + # the correct ValueError and terminate the test early. + num_combinations = pf._num_combinations( + n_features=n_features, + min_degree=0, + max_degree=2, + interaction_only=pf.interaction_only, + include_bias=pf.include_bias, + ) + if num_combinations > np.iinfo(np.intp).max: + msg = ( + r"The output that would result from the current configuration would have" + r" \d* features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit(X) + return + X_trans = pf.fit_transform(X) + row_nonzero, col_nonzero = X_trans.nonzero() + n_degree_1_features_out = n_features + include_bias + max_degree_2_idx = ( + degree_2_calc(n_features, col[int(not interaction_only)], col[1]) + + n_degree_1_features_out + ) + + # Account for bias of all samples except last one which will be handled + # separately since there are distinct data values before it + data_target = [1] * (n_samples - 2) if include_bias else [] + col_nonzero_target = [0] * (n_samples - 2) if include_bias else [] + + for i in range(2): + x = data[2 * i] + y = data[2 * i + 1] + x_idx = col[2 * i] + y_idx = col[2 * i + 1] + if include_bias: + data_target.append(1) + col_nonzero_target.append(0) + data_target.extend([x, y]) + col_nonzero_target.extend( + [x_idx + int(include_bias), y_idx + int(include_bias)] + ) + if not interaction_only: + data_target.extend([x * x, x * y, y * y]) + col_nonzero_target.extend( + [ + degree_2_calc(n_features, x_idx, x_idx) + n_degree_1_features_out, + degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out, + degree_2_calc(n_features, y_idx, y_idx) + n_degree_1_features_out, + ] + ) + else: + data_target.extend([x * y]) + col_nonzero_target.append( + degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out + ) + + nnz_per_row = int(include_bias) + 3 + 2 * int(not interaction_only) + + assert pf.n_output_features_ == max_degree_2_idx + 1 + assert X_trans.dtype == data_dtype + assert X_trans.shape == (n_samples, max_degree_2_idx + 1) + assert X_trans.indptr.dtype == X_trans.indices.dtype == np.int64 + # Ensure that dtype promotion was actually required: + assert X_trans.indices.max() > np.iinfo(np.int32).max + + row_nonzero_target = list(range(n_samples - 2)) if include_bias else [] + row_nonzero_target.extend( + [n_samples - 2] * nnz_per_row + [n_samples - 1] * nnz_per_row + ) + + assert_allclose(X_trans.data, data_target) + assert_array_equal(row_nonzero, row_nonzero_target) + assert_array_equal(col_nonzero, col_nonzero_target) + + +@pytest.mark.parametrize( + "degree, n_features", + [ + # Needs promotion to int64 when interaction_only=False + (2, 65535), + (3, 2344), + # This guarantees that the intermediate operation when calculating + # output columns would overflow a C-long, hence checks that python- + # longs are being used. + (2, int(np.sqrt(np.iinfo(np.int64).max) + 1)), + (3, 65535), + # This case tests the second clause of the overflow check which + # takes into account the value of `n_features` itself. + (2, int(np.sqrt(np.iinfo(np.int64).max))), + ], +) +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_index_overflow( + degree, n_features, interaction_only, include_bias, csr_container +): + """Tests known edge-cases to the dtype promotion strategy and custom + Cython code, including a current bug in the upstream + `scipy.sparse.hstack`. + """ + data = [1.0] + row = [0] + col = [n_features - 1] + + # First degree index + expected_indices = [ + n_features - 1 + int(include_bias), + ] + # Second degree index + expected_indices.append(n_features * (n_features + 1) // 2 + expected_indices[0]) + # Third degree index + expected_indices.append( + n_features * (n_features + 1) * (n_features + 2) // 6 + expected_indices[1] + ) + + X = csr_container((data, (row, col))) + pf = PolynomialFeatures( + interaction_only=interaction_only, include_bias=include_bias, degree=degree + ) + + # Calculate the number of combinations a-priori, and if needed check for + # the correct ValueError and terminate the test early. + num_combinations = pf._num_combinations( + n_features=n_features, + min_degree=0, + max_degree=degree, + interaction_only=pf.interaction_only, + include_bias=pf.include_bias, + ) + if num_combinations > np.iinfo(np.intp).max: + msg = ( + r"The output that would result from the current configuration would have" + r" \d* features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit(X) + return + + # In SciPy < 1.8, a bug occurs when an intermediate matrix in + # `to_stack` in `hstack` fits within int32 however would require int64 when + # combined with all previous matrices in `to_stack`. + if sp_version < parse_version("1.8.0"): + has_bug = False + max_int32 = np.iinfo(np.int32).max + cumulative_size = n_features + include_bias + for deg in range(2, degree + 1): + max_indptr = _calc_total_nnz(X.indptr, interaction_only, deg) + max_indices = _calc_expanded_nnz(n_features, interaction_only, deg) - 1 + cumulative_size += max_indices + 1 + needs_int64 = max(max_indices, max_indptr) > max_int32 + has_bug |= not needs_int64 and cumulative_size > max_int32 + if has_bug: + msg = r"In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`" + with pytest.raises(ValueError, match=msg): + X_trans = pf.fit_transform(X) + return + + # When `n_features>=65535`, `scipy.sparse.hstack` may not use the right + # dtype for representing indices and indptr if `n_features` is still + # small enough so that each block matrix's indices and indptr arrays + # can be represented with `np.int32`. We test `n_features==65535` + # since it is guaranteed to run into this bug. + if ( + sp_version < parse_version("1.9.2") + and n_features == 65535 + and degree == 2 + and not interaction_only + ): # pragma: no cover + msg = r"In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" + with pytest.raises(ValueError, match=msg): + X_trans = pf.fit_transform(X) + return + X_trans = pf.fit_transform(X) + + expected_dtype = np.int64 if num_combinations > np.iinfo(np.int32).max else np.int32 + # Terms higher than first degree + non_bias_terms = 1 + (degree - 1) * int(not interaction_only) + expected_nnz = int(include_bias) + non_bias_terms + assert X_trans.dtype == X.dtype + assert X_trans.shape == (1, pf.n_output_features_) + assert X_trans.indptr.dtype == X_trans.indices.dtype == expected_dtype + assert X_trans.nnz == expected_nnz + + if include_bias: + assert X_trans[0, 0] == pytest.approx(1.0) + for idx in range(non_bias_terms): + assert X_trans[0, expected_indices[idx]] == pytest.approx(1.0) + + offset = interaction_only * n_features + if degree == 3: + offset *= 1 + n_features + assert pf.n_output_features_ == expected_indices[degree - 1] + 1 - offset + + +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_too_large_to_index( + interaction_only, include_bias, csr_container +): + n_features = np.iinfo(np.int64).max // 2 + data = [1.0] + row = [0] + col = [n_features - 1] + X = csr_container((data, (row, col))) + pf = PolynomialFeatures( + interaction_only=interaction_only, include_bias=include_bias, degree=(2, 2) + ) + msg = ( + r"The output that would result from the current configuration would have \d*" + r" features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit(X) + with pytest.raises(ValueError, match=msg): + pf.fit_transform(X) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_polynomial_features_behaviour_on_zero_degree(sparse_container): + """Check that PolynomialFeatures raises error when degree=0 and include_bias=False, + and output a single constant column when include_bias=True + """ + X = np.ones((10, 2)) + poly = PolynomialFeatures(degree=0, include_bias=False) + err_msg = ( + "Setting degree to zero and include_bias to False would result in" + " an empty output array." + ) + with pytest.raises(ValueError, match=err_msg): + poly.fit_transform(X) + + poly = PolynomialFeatures(degree=(0, 0), include_bias=False) + err_msg = ( + "Setting both min_degree and max_degree to zero and include_bias to" + " False would result in an empty output array." + ) + with pytest.raises(ValueError, match=err_msg): + poly.fit_transform(X) + + for _X in [X, sparse_container(X)]: + poly = PolynomialFeatures(degree=0, include_bias=True) + output = poly.fit_transform(_X) + # convert to dense array if needed + if sparse.issparse(output): + output = output.toarray() + assert_array_equal(output, np.ones((X.shape[0], 1))) + + +def test_sizeof_LARGEST_INT_t(): + # On Windows, scikit-learn is typically compiled with MSVC that + # does not support int128 arithmetic (at the time of writing): + # https://stackoverflow.com/a/6761962/163740 + if sys.platform == "win32" or ( + sys.maxsize <= 2**32 and sys.platform != "emscripten" + ): + expected_size = 8 + else: + expected_size = 16 + + assert _get_sizeof_LARGEST_INT_t() == expected_size + + +@pytest.mark.xfail( + sys.platform == "win32", + reason=( + "On Windows, scikit-learn is typically compiled with MSVC that does not support" + " int128 arithmetic (at the time of writing)" + ), + run=True, +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_windows_fail(csr_container): + # Minimum needed to ensure integer overflow occurs while guaranteeing an + # int64-indexable output. + n_features = int(np.iinfo(np.int64).max ** (1 / 3) + 3) + data = [1.0] + row = [0] + col = [n_features - 1] + + # First degree index + expected_indices = [ + n_features - 1, + ] + # Second degree index + expected_indices.append( + int(n_features * (n_features + 1) // 2 + expected_indices[0]) + ) + # Third degree index + expected_indices.append( + int(n_features * (n_features + 1) * (n_features + 2) // 6 + expected_indices[1]) + ) + + X = csr_container((data, (row, col))) + pf = PolynomialFeatures(interaction_only=False, include_bias=False, degree=3) + if sys.maxsize <= 2**32: + msg = ( + r"The output that would result from the current configuration would" + r" have \d*" + r" features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit_transform(X) + else: + X_trans = pf.fit_transform(X) + for idx in range(3): + assert X_trans[0, expected_indices[idx]] == pytest.approx(1.0)