diff --git a/.gitattributes b/.gitattributes index a86e23a94519017cc3bd1392cec9121c107fc130..65fd7e4a56aa794c117e9c0a6caae223d97bc2b5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -213,3 +213,4 @@ llmeval-env/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_6 llmeval-env/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..83ac698458c7adac8bcda219b26f50cb0b2a2100 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e6a38d79d8f9e53a2ce11b68b4153062d4e96ec0b368d02b2e64f1b33c51693 +size 551 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b3c915315eff5a266c715e3f99584b16ec06ea8f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190 +size 306 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b3c915315eff5a266c715e3f99584b16ec06ea8f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190 +size 306 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..9c2f6f263750517c2b3ca25942cc4a426bc72de0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ef6025425fdfc5f736555ea385252af5bcbf62383615db82489366d4f96a0a7 +size 327 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..9cd17f124ef74920b925490ecc8e415dcd59d225 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9da09e9a6031d060ec416f639a6bf34989e6c88ce641d10621eb906ba1d8c293 +size 99 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-s-act-.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..29b93d4214dac84a592173457e4eac04c15bb926 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35890d08165c804526b48aad462d7ccc09e808bd7975ba604bd612b9608797ac +size 319 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..7bdb62f1628f096b9f91eb2e94ffc413bab4696c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7ee24adabd4aaed6419b43fe9d3f86d55fcf4bee0f1698ae21d86c2701314e3 +size 2532 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..65982d59860e015a25a42d8bb57f72bf327c9e0b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:066a216679b197cc51946e17ee9a2e28215425991b0ceb7f10988c14f7f3f869 +size 2473 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..faf70da9cea25d998883721d679ca9f0030d9575 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec4f2d6bc4df3882b08bba01571e0792a56f79e0a922d984897773acd284b426 +size 535 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..24537ca9b1e5187b37136b19898ab370dec315d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09ef19cfad25c5de487ddbaef3c4d068ca3063777730a288dfd6f5096a0c6f46 +size 1407 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/data-v1-dl-3.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/data-v1-dl-3.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..32bdf94f0f4eac4f936d82476fc75917e92317fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/data-v1-dl-3.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c63fdf8861761f1ca70509f7d2d169a7cc053988c7b7c09c09a6db6124e208be +size 19485 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c3454ff8e5a399b14e2033d6122315c4e4b2dbfc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59d1aa6b02d2358c16fa9e4fbeff523a3bd10ebd38c7c371911fa8335e7bdcbf +size 598 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..49d394f0c458c02f7d9781445ef870cf8f747e0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f7973193eb35d19e99d1d8bca3c7f3a8b8d0410508af34ad571aee8ec5ab05 +size 913 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2b93281d0ded598bd03b1160a1b8a86df61b485c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36c63c3ac8c9db59910acbf4c772cd53040ccd0eac0b0452611dd7ad8da50474 +size 1660 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..138ffc718b067282922ebeb107b22b8c3af08477 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8adac8e2f8cbcbfa9677acdd4927a961430465d2c99401832160be455cfaced8 +size 3690 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..7e6491106a294f38733d8dfd6475c1afe42b8848 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c203b4627175cebbf527d81917a499911af915f6f2f46ee7248428a948d603 +size 325 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..ecd8d1b12a547833c2d00ed29be640a12167d082 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:301396b4a42c814b1a15038ddfcbcf5c8590501231747d0dc2a500b84b2fd0df +size 328 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..1d119ce6ec907e4689015911b16bcbfe8552b4e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dee83987fffa8ec20e23b3cabc00d42beb7a469af6bd803909998c1687fa634 +size 934 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..c82d051bccb1b232214b31c73114f4f78749d810 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c5fd93ffec7deb63a940fd698534dd7ebb7db349fc183930041cbf17e60e2cc +size 6471 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f9cfe07dc0e88e6b692ec0c5450d44acfd5594a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__init__.py @@ -0,0 +1,52 @@ +""" +The :mod:`sklearn.decomposition` module includes matrix decomposition +algorithms, including among others PCA, NMF or ICA. Most of the algorithms of +this module can be regarded as dimensionality reduction techniques. +""" + + +from ..utils.extmath import randomized_svd +from ._dict_learning import ( + DictionaryLearning, + MiniBatchDictionaryLearning, + SparseCoder, + dict_learning, + dict_learning_online, + sparse_encode, +) +from ._factor_analysis import FactorAnalysis +from ._fastica import FastICA, fastica +from ._incremental_pca import IncrementalPCA +from ._kernel_pca import KernelPCA +from ._lda import LatentDirichletAllocation +from ._nmf import ( + NMF, + MiniBatchNMF, + non_negative_factorization, +) +from ._pca import PCA +from ._sparse_pca import MiniBatchSparsePCA, SparsePCA +from ._truncated_svd import TruncatedSVD + +__all__ = [ + "DictionaryLearning", + "FastICA", + "IncrementalPCA", + "KernelPCA", + "MiniBatchDictionaryLearning", + "MiniBatchNMF", + "MiniBatchSparsePCA", + "NMF", + "PCA", + "SparseCoder", + "SparsePCA", + "dict_learning", + "dict_learning_online", + "fastica", + "non_negative_factorization", + "randomized_svd", + "sparse_encode", + "FactorAnalysis", + "TruncatedSVD", + "LatentDirichletAllocation", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa720751774f794dc92263c66034966cca3307a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_base.py @@ -0,0 +1,193 @@ +"""Principal Component Analysis Base Classes""" + +# Author: Alexandre Gramfort +# Olivier Grisel +# Mathieu Blondel +# Denis A. Engemann +# Kyle Kastner +# +# License: BSD 3 clause + +from abc import ABCMeta, abstractmethod + +import numpy as np +from scipy import linalg +from scipy.sparse import issparse + +from ..base import BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin +from ..utils._array_api import _add_to_diagonal, device, get_namespace +from ..utils.sparsefuncs import _implicit_column_offset +from ..utils.validation import check_is_fitted + + +class _BasePCA( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta +): + """Base class for PCA methods. + + Warning: This class should not be used directly. + Use derived classes instead. + """ + + def get_covariance(self): + """Compute data covariance with the generative model. + + ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` + where S**2 contains the explained variances, and sigma2 contains the + noise variances. + + Returns + ------- + cov : array of shape=(n_features, n_features) + Estimated covariance of data. + """ + xp, _ = get_namespace(self.components_) + + components_ = self.components_ + exp_var = self.explained_variance_ + if self.whiten: + components_ = components_ * xp.sqrt(exp_var[:, np.newaxis]) + exp_var_diff = exp_var - self.noise_variance_ + exp_var_diff = xp.where( + exp_var > self.noise_variance_, + exp_var_diff, + xp.asarray(0.0, device=device(exp_var)), + ) + cov = (components_.T * exp_var_diff) @ components_ + _add_to_diagonal(cov, self.noise_variance_, xp) + return cov + + def get_precision(self): + """Compute data precision matrix with the generative model. + + Equals the inverse of the covariance but computed with + the matrix inversion lemma for efficiency. + + Returns + ------- + precision : array, shape=(n_features, n_features) + Estimated precision of data. + """ + xp, is_array_api_compliant = get_namespace(self.components_) + + n_features = self.components_.shape[1] + + # handle corner cases first + if self.n_components_ == 0: + return xp.eye(n_features) / self.noise_variance_ + + if is_array_api_compliant: + linalg_inv = xp.linalg.inv + else: + linalg_inv = linalg.inv + + if self.noise_variance_ == 0.0: + return linalg_inv(self.get_covariance()) + + # Get precision using matrix inversion lemma + components_ = self.components_ + exp_var = self.explained_variance_ + if self.whiten: + components_ = components_ * xp.sqrt(exp_var[:, np.newaxis]) + exp_var_diff = exp_var - self.noise_variance_ + exp_var_diff = xp.where( + exp_var > self.noise_variance_, + exp_var_diff, + xp.asarray(0.0, device=device(exp_var)), + ) + precision = components_ @ components_.T / self.noise_variance_ + _add_to_diagonal(precision, 1.0 / exp_var_diff, xp) + precision = components_.T @ linalg_inv(precision) @ components_ + precision /= -(self.noise_variance_**2) + _add_to_diagonal(precision, 1.0 / self.noise_variance_, xp) + return precision + + @abstractmethod + def fit(self, X, y=None): + """Placeholder for fit. Subclasses should implement this method! + + Fit the model with X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + self : object + Returns the instance itself. + """ + + def transform(self, X): + """Apply dimensionality reduction to X. + + X is projected on the first principal components previously extracted + from a training set. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : array-like of shape (n_samples, n_components) + Projection of X in the first principal components, where `n_samples` + is the number of samples and `n_components` is the number of the components. + """ + xp, _ = get_namespace(X) + + check_is_fitted(self) + + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[xp.float64, xp.float32], reset=False + ) + if self.mean_ is not None: + if issparse(X): + X = _implicit_column_offset(X, self.mean_) + else: + X = X - self.mean_ + X_transformed = X @ self.components_.T + if self.whiten: + X_transformed /= xp.sqrt(self.explained_variance_) + return X_transformed + + def inverse_transform(self, X): + """Transform data back to its original space. + + In other words, return an input `X_original` whose transform would be X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + New data, where `n_samples` is the number of samples + and `n_components` is the number of components. + + Returns + ------- + X_original array-like of shape (n_samples, n_features) + Original data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Notes + ----- + If whitening is enabled, inverse_transform will compute the + exact inverse operation, which includes reversing whitening. + """ + xp, _ = get_namespace(X) + + if self.whiten: + scaled_components = ( + xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_ + ) + return X @ scaled_components + self.mean_ + else: + return X @ self.components_ + self.mean_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4a2a56a99276d26b8a4ed781065b1e81071808c1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py new file mode 100644 index 0000000000000000000000000000000000000000..51350aa5e05bdbb0a9a8b691837d2476f3198981 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py @@ -0,0 +1,2301 @@ +""" Dictionary learning. +""" +# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort +# License: BSD 3 clause + +import itertools +import sys +import time +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from joblib import effective_n_jobs +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..linear_model import Lars, Lasso, LassoLars, orthogonal_mp_gram +from ..utils import check_array, check_random_state, gen_batches, gen_even_slices +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.extmath import randomized_svd, row_norms, svd_flip +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted + + +def _check_positive_coding(method, positive): + if positive and method in ["omp", "lars"]: + raise ValueError( + "Positive constraint not supported for '{}' coding method.".format(method) + ) + + +def _sparse_encode_precomputed( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + regularization=None, + copy_cov=True, + init=None, + max_iter=1000, + verbose=0, + positive=False, +): + """Generic sparse coding with precomputed Gram and/or covariance matrices. + + Each row of the result is the solution to a Lasso problem. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data matrix. + + dictionary : ndarray of shape (n_components, n_features) + The dictionary matrix against which to solve the sparse coding of + the data. Some of the algorithms assume normalized rows. + + gram : ndarray of shape (n_components, n_components), default=None + Precomputed Gram matrix, `dictionary * dictionary'` + gram can be `None` if method is 'threshold'. + + cov : ndarray of shape (n_components, n_samples), default=None + Precomputed covariance, `dictionary * X'`. + + algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \ + default='lasso_lars' + The algorithm used: + + * `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + * `'lasso_lars'`: uses Lars to compute the Lasso solution; + * `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if + the estimated components are sparse; + * `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + * `'threshold'`: squashes to zero all coefficients less than + regularization from the projection `dictionary * data'`. + + regularization : int or float, default=None + The regularization parameter. It corresponds to alpha when + algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`. + Otherwise it corresponds to `n_nonzero_coefs`. + + init : ndarray of shape (n_samples, n_components), default=None + Initialization value of the sparse code. Only used if + `algorithm='lasso_cd'`. + + max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + copy_cov : bool, default=True + Whether to copy the precomputed covariance matrix; if `False`, it may + be overwritten. + + verbose : int, default=0 + Controls the verbosity; the higher, the more messages. + + positive: bool, default=False + Whether to enforce a positivity constraint on the sparse code. + + .. versionadded:: 0.20 + + Returns + ------- + code : ndarray of shape (n_components, n_features) + The sparse codes. + """ + n_samples, n_features = X.shape + n_components = dictionary.shape[0] + + if algorithm == "lasso_lars": + alpha = float(regularization) / n_features # account for scaling + try: + err_mgt = np.seterr(all="ignore") + + # Not passing in verbose=max(0, verbose-1) because Lars.fit already + # corrects the verbosity level. + lasso_lars = LassoLars( + alpha=alpha, + fit_intercept=False, + verbose=verbose, + precompute=gram, + fit_path=False, + positive=positive, + max_iter=max_iter, + ) + lasso_lars.fit(dictionary.T, X.T, Xy=cov) + new_code = lasso_lars.coef_ + finally: + np.seterr(**err_mgt) + + elif algorithm == "lasso_cd": + alpha = float(regularization) / n_features # account for scaling + + # TODO: Make verbosity argument for Lasso? + # sklearn.linear_model.coordinate_descent.enet_path has a verbosity + # argument that we could pass in from Lasso. + clf = Lasso( + alpha=alpha, + fit_intercept=False, + precompute=gram, + max_iter=max_iter, + warm_start=True, + positive=positive, + ) + + if init is not None: + # In some workflows using coordinate descent algorithms: + # - users might provide NumPy arrays with read-only buffers + # - `joblib` might memmap arrays making their buffer read-only + # TODO: move this handling (which is currently too broad) + # closer to the actual private function which need buffers to be writable. + if not init.flags["WRITEABLE"]: + init = np.array(init) + clf.coef_ = init + + clf.fit(dictionary.T, X.T, check_input=False) + new_code = clf.coef_ + + elif algorithm == "lars": + try: + err_mgt = np.seterr(all="ignore") + + # Not passing in verbose=max(0, verbose-1) because Lars.fit already + # corrects the verbosity level. + lars = Lars( + fit_intercept=False, + verbose=verbose, + precompute=gram, + n_nonzero_coefs=int(regularization), + fit_path=False, + ) + lars.fit(dictionary.T, X.T, Xy=cov) + new_code = lars.coef_ + finally: + np.seterr(**err_mgt) + + elif algorithm == "threshold": + new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T + if positive: + np.clip(new_code, 0, None, out=new_code) + + elif algorithm == "omp": + new_code = orthogonal_mp_gram( + Gram=gram, + Xy=cov, + n_nonzero_coefs=int(regularization), + tol=None, + norms_squared=row_norms(X, squared=True), + copy_Xy=copy_cov, + ).T + + return new_code.reshape(n_samples, n_components) + + +@validate_params( + { + "X": ["array-like"], + "dictionary": ["array-like"], + "gram": ["array-like", None], + "cov": ["array-like", None], + "algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left"), None], + "copy_cov": ["boolean"], + "init": ["array-like", None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "n_jobs": [Integral, None], + "check_input": ["boolean"], + "verbose": ["verbose"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +# XXX : could be moved to the linear_model module +def sparse_encode( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + n_nonzero_coefs=None, + alpha=None, + copy_cov=True, + init=None, + max_iter=1000, + n_jobs=None, + check_input=True, + verbose=0, + positive=False, +): + """Sparse coding. + + Each row of the result is the solution to a sparse coding problem. + The goal is to find a sparse array `code` such that:: + + X ~= code * dictionary + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data matrix. + + dictionary : array-like of shape (n_components, n_features) + The dictionary matrix against which to solve the sparse coding of + the data. Some of the algorithms assume normalized rows for meaningful + output. + + gram : array-like of shape (n_components, n_components), default=None + Precomputed Gram matrix, `dictionary * dictionary'`. + + cov : array-like of shape (n_components, n_samples), default=None + Precomputed covariance, `dictionary' * X`. + + algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \ + default='lasso_lars' + The algorithm used: + + * `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + * `'lasso_lars'`: uses Lars to compute the Lasso solution; + * `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if + the estimated components are sparse; + * `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + * `'threshold'`: squashes to zero all coefficients less than + regularization from the projection `dictionary * data'`. + + n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and `algorithm='omp'` + and is overridden by `alpha` in the `omp` case. If `None`, then + `n_nonzero_coefs=int(n_features / 10)`. + + alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of + the reconstruction error targeted. In this case, it overrides + `n_nonzero_coefs`. + If `None`, default to 1. + + copy_cov : bool, default=True + Whether to copy the precomputed covariance matrix; if `False`, it may + be overwritten. + + init : ndarray of shape (n_samples, n_components), default=None + Initialization value of the sparse codes. Only used if + `algorithm='lasso_cd'`. + + max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + check_input : bool, default=True + If `False`, the input arrays X and dictionary will not be checked. + + verbose : int, default=0 + Controls the verbosity; the higher, the more messages. + + positive : bool, default=False + Whether to enforce positivity when finding the encoding. + + .. versionadded:: 0.20 + + Returns + ------- + code : ndarray of shape (n_samples, n_components) + The sparse codes. + + See Also + -------- + sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems. + sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer. + SparseCoder : Find a sparse representation of data from a fixed precomputed + dictionary. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import sparse_encode + >>> X = np.array([[-1, -1, -1], [0, 0, 3]]) + >>> dictionary = np.array( + ... [[0, 1, 0], + ... [-1, -1, 2], + ... [1, 1, 1], + ... [0, 1, 1], + ... [0, 2, 1]], + ... dtype=np.float64 + ... ) + >>> sparse_encode(X, dictionary, alpha=1e-10) + array([[ 0., 0., -1., 0., 0.], + [ 0., 1., 1., 0., 0.]]) + """ + if check_input: + if algorithm == "lasso_cd": + dictionary = check_array( + dictionary, order="C", dtype=[np.float64, np.float32] + ) + X = check_array(X, order="C", dtype=[np.float64, np.float32]) + else: + dictionary = check_array(dictionary) + X = check_array(X) + + if dictionary.shape[1] != X.shape[1]: + raise ValueError( + "Dictionary and X have different numbers of features:" + "dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape) + ) + + _check_positive_coding(algorithm, positive) + + return _sparse_encode( + X, + dictionary, + gram=gram, + cov=cov, + algorithm=algorithm, + n_nonzero_coefs=n_nonzero_coefs, + alpha=alpha, + copy_cov=copy_cov, + init=init, + max_iter=max_iter, + n_jobs=n_jobs, + verbose=verbose, + positive=positive, + ) + + +def _sparse_encode( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + n_nonzero_coefs=None, + alpha=None, + copy_cov=True, + init=None, + max_iter=1000, + n_jobs=None, + verbose=0, + positive=False, +): + """Sparse coding without input/parameter validation.""" + + n_samples, n_features = X.shape + n_components = dictionary.shape[0] + + if algorithm in ("lars", "omp"): + regularization = n_nonzero_coefs + if regularization is None: + regularization = min(max(n_features / 10, 1), n_components) + else: + regularization = alpha + if regularization is None: + regularization = 1.0 + + if gram is None and algorithm != "threshold": + gram = np.dot(dictionary, dictionary.T) + + if cov is None and algorithm != "lasso_cd": + copy_cov = False + cov = np.dot(dictionary, X.T) + + if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold": + code = _sparse_encode_precomputed( + X, + dictionary, + gram=gram, + cov=cov, + algorithm=algorithm, + regularization=regularization, + copy_cov=copy_cov, + init=init, + max_iter=max_iter, + verbose=verbose, + positive=positive, + ) + return code + + # Enter parallel code block + n_samples = X.shape[0] + n_components = dictionary.shape[0] + code = np.empty((n_samples, n_components)) + slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs))) + + code_views = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(_sparse_encode_precomputed)( + X[this_slice], + dictionary, + gram=gram, + cov=cov[:, this_slice] if cov is not None else None, + algorithm=algorithm, + regularization=regularization, + copy_cov=copy_cov, + init=init[this_slice] if init is not None else None, + max_iter=max_iter, + verbose=verbose, + positive=positive, + ) + for this_slice in slices + ) + for this_slice, this_view in zip(slices, code_views): + code[this_slice] = this_view + return code + + +def _update_dict( + dictionary, + Y, + code, + A=None, + B=None, + verbose=False, + random_state=None, + positive=False, +): + """Update the dense dictionary factor in place. + + Parameters + ---------- + dictionary : ndarray of shape (n_components, n_features) + Value of the dictionary at the previous iteration. + + Y : ndarray of shape (n_samples, n_features) + Data matrix. + + code : ndarray of shape (n_samples, n_components) + Sparse coding of the data against which to optimize the dictionary. + + A : ndarray of shape (n_components, n_components), default=None + Together with `B`, sufficient stats of the online model to update the + dictionary. + + B : ndarray of shape (n_features, n_components), default=None + Together with `A`, sufficient stats of the online model to update the + dictionary. + + verbose: bool, default=False + Degree of output the procedure will print. + + random_state : int, RandomState instance or None, default=None + Used for randomly initializing the dictionary. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + positive : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + """ + n_samples, n_components = code.shape + random_state = check_random_state(random_state) + + if A is None: + A = code.T @ code + if B is None: + B = Y.T @ code + + n_unused = 0 + + for k in range(n_components): + if A[k, k] > 1e-6: + # 1e-6 is arbitrary but consistent with the spams implementation + dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k] + else: + # kth atom is almost never used -> sample a new one from the data + newd = Y[random_state.choice(n_samples)] + + # add small noise to avoid making the sparse coding ill conditioned + noise_level = 0.01 * (newd.std() or 1) # avoid 0 std + noise = random_state.normal(0, noise_level, size=len(newd)) + + dictionary[k] = newd + noise + code[:, k] = 0 + n_unused += 1 + + if positive: + np.clip(dictionary[k], 0, None, out=dictionary[k]) + + # Projection on the constraint set ||V_k|| <= 1 + dictionary[k] /= max(linalg.norm(dictionary[k]), 1) + + if verbose and n_unused > 0: + print(f"{n_unused} unused atoms resampled.") + + +def _dict_learning( + X, + n_components, + *, + alpha, + max_iter, + tol, + method, + n_jobs, + dict_init, + code_init, + callback, + verbose, + random_state, + return_n_iter, + positive_dict, + positive_code, + method_max_iter, +): + """Main dictionary learning algorithm""" + t0 = time.time() + # Init the code and the dictionary with SVD of Y + if code_init is not None and dict_init is not None: + code = np.array(code_init, order="F") + # Don't copy V, it will happen below + dictionary = dict_init + else: + code, S, dictionary = linalg.svd(X, full_matrices=False) + # flip the initial code's sign to enforce deterministic output + code, dictionary = svd_flip(code, dictionary) + dictionary = S[:, np.newaxis] * dictionary + r = len(dictionary) + if n_components <= r: # True even if n_components=None + code = code[:, :n_components] + dictionary = dictionary[:n_components, :] + else: + code = np.c_[code, np.zeros((len(code), n_components - r))] + dictionary = np.r_[ + dictionary, np.zeros((n_components - r, dictionary.shape[1])) + ] + + # Fortran-order dict better suited for the sparse coding which is the + # bottleneck of this algorithm. + dictionary = np.asfortranarray(dictionary) + + errors = [] + current_cost = np.nan + + if verbose == 1: + print("[dict_learning]", end=" ") + + # If max_iter is 0, number of iterations returned should be zero + ii = -1 + + for ii in range(max_iter): + dt = time.time() - t0 + if verbose == 1: + sys.stdout.write(".") + sys.stdout.flush() + elif verbose: + print( + "Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" + % (ii, dt, dt / 60, current_cost) + ) + + # Update code + code = sparse_encode( + X, + dictionary, + algorithm=method, + alpha=alpha, + init=code, + n_jobs=n_jobs, + positive=positive_code, + max_iter=method_max_iter, + verbose=verbose, + ) + + # Update dictionary in place + _update_dict( + dictionary, + X, + code, + verbose=verbose, + random_state=random_state, + positive=positive_dict, + ) + + # Cost function + current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum( + np.abs(code) + ) + errors.append(current_cost) + + if ii > 0: + dE = errors[-2] - errors[-1] + # assert(dE >= -tol * errors[-1]) + if dE < tol * errors[-1]: + if verbose == 1: + # A line return + print("") + elif verbose: + print("--- Convergence reached after %d iterations" % ii) + break + if ii % 5 == 0 and callback is not None: + callback(locals()) + + if return_n_iter: + return code, dictionary, errors, ii + 1 + else: + return code, dictionary, errors + + +def dict_learning_online( + X, + n_components=2, + *, + alpha=1, + max_iter=100, + return_code=True, + dict_init=None, + callback=None, + batch_size=256, + verbose=False, + shuffle=True, + n_jobs=None, + method="lars", + random_state=None, + positive_dict=False, + positive_code=False, + method_max_iter=1000, + tol=1e-3, + max_no_improvement=10, +): + """Solve a dictionary learning matrix factorization problem online. + + Finds the best dictionary and the corresponding sparse code for + approximating the data matrix X by solving:: + + (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 = 1 for all 0 <= k < n_components + + where V is the dictionary and U is the sparse code. ||.||_Fro stands for + the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm + which is the sum of the absolute values of all the entries in the matrix. + This is accomplished by repeatedly iterating over mini-batches by slicing + the input data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data matrix. + + n_components : int or None, default=2 + Number of dictionary atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1 + Sparsity controlling parameter. + + max_iter : int, default=100 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.1 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `100`) instead. + + return_code : bool, default=True + Whether to also return the code U or just the dictionary `V`. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial values for the dictionary for warm restart scenarios. + If `None`, the initial values for the dictionary are created + with an SVD decomposition of the data via + :func:`~sklearn.utils.extmath.randomized_svd`. + + callback : callable, default=None + A callable that gets invoked at the end of each iteration. + + batch_size : int, default=256 + The number of samples to take in each batch. + + .. versionchanged:: 1.3 + The default value of `batch_size` changed from 3 to 256 in version 1.3. + + verbose : bool, default=False + To control the verbosity of the procedure. + + shuffle : bool, default=True + Whether to shuffle the data before splitting it in batches. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + method : {'lars', 'cd'}, default='lars' + * `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + method_max_iter : int, default=1000 + Maximum number of iterations to perform when solving the lasso problem. + + .. versionadded:: 0.22 + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + .. versionadded:: 1.1 + + Returns + ------- + code : ndarray of shape (n_samples, n_components), + The sparse code (only returned if `return_code=True`). + + dictionary : ndarray of shape (n_components, n_features), + The solutions to the dictionary learning problem. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to `True`. + + See Also + -------- + dict_learning : Solve a dictionary learning matrix factorization problem. + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary + learning algorithm. + SparsePCA : Sparse Principal Components Analysis. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import dict_learning_online + >>> X, _, _ = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> U, V = dict_learning_online( + ... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42 + ... ) + + We can check the level of sparsity of `U`: + + >>> np.mean(U == 0) + 0.53... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = U @ V + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.05... + """ + # TODO(1.6): remove in 1.6 + if max_iter is None: + warn( + ( + "`max_iter=None` is deprecated in version 1.4 and will be removed in " + "version 1.6. Use the default value (i.e. `100`) instead." + ), + FutureWarning, + ) + max_iter = 100 + + transform_algorithm = "lasso_" + method + + est = MiniBatchDictionaryLearning( + n_components=n_components, + alpha=alpha, + max_iter=max_iter, + n_jobs=n_jobs, + fit_algorithm=method, + batch_size=batch_size, + shuffle=shuffle, + dict_init=dict_init, + random_state=random_state, + transform_algorithm=transform_algorithm, + transform_alpha=alpha, + positive_code=positive_code, + positive_dict=positive_dict, + transform_max_iter=method_max_iter, + verbose=verbose, + callback=callback, + tol=tol, + max_no_improvement=max_no_improvement, + ).fit(X) + + if not return_code: + return est.components_ + else: + code = est.transform(X) + return code, est.components_ + + +@validate_params( + { + "X": ["array-like"], + "method": [StrOptions({"lars", "cd"})], + "return_n_iter": ["boolean"], + "method_max_iter": [Interval(Integral, 0, None, closed="left")], + }, + prefer_skip_nested_validation=False, +) +def dict_learning( + X, + n_components, + *, + alpha, + max_iter=100, + tol=1e-8, + method="lars", + n_jobs=None, + dict_init=None, + code_init=None, + callback=None, + verbose=False, + random_state=None, + return_n_iter=False, + positive_dict=False, + positive_code=False, + method_max_iter=1000, +): + """Solve a dictionary learning matrix factorization problem. + + Finds the best dictionary and the corresponding sparse code for + approximating the data matrix X by solving:: + + (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 = 1 for all 0 <= k < n_components + + where V is the dictionary and U is the sparse code. ||.||_Fro stands for + the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm + which is the sum of the absolute values of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data matrix. + + n_components : int + Number of dictionary atoms to extract. + + alpha : int or float + Sparsity controlling parameter. + + max_iter : int, default=100 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for the stopping condition. + + method : {'lars', 'cd'}, default='lars' + The method used: + + * `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial value for the dictionary for warm restart scenarios. Only used + if `code_init` and `dict_init` are not None. + + code_init : ndarray of shape (n_samples, n_components), default=None + Initial value for the sparse code for warm restart scenarios. Only used + if `code_init` and `dict_init` are not None. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + verbose : bool, default=False + To control the verbosity of the procedure. + + random_state : int, RandomState instance or None, default=None + Used for randomly initializing the dictionary. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + method_max_iter : int, default=1000 + Maximum number of iterations to perform. + + .. versionadded:: 0.22 + + Returns + ------- + code : ndarray of shape (n_samples, n_components) + The sparse code factor in the matrix factorization. + + dictionary : ndarray of shape (n_components, n_features), + The dictionary factor in the matrix factorization. + + errors : array + Vector of errors at each iteration. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to True. + + See Also + -------- + dict_learning_online : Solve a dictionary learning matrix factorization + problem online. + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate version + of the dictionary learning algorithm. + SparsePCA : Sparse Principal Components Analysis. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import dict_learning + >>> X, _, _ = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42) + + We can check the level of sparsity of `U`: + + >>> np.mean(U == 0) + 0.6... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = U @ V + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.01... + """ + estimator = DictionaryLearning( + n_components=n_components, + alpha=alpha, + max_iter=max_iter, + tol=tol, + fit_algorithm=method, + n_jobs=n_jobs, + dict_init=dict_init, + callback=callback, + code_init=code_init, + verbose=verbose, + random_state=random_state, + positive_code=positive_code, + positive_dict=positive_dict, + transform_max_iter=method_max_iter, + ).set_output(transform="default") + code = estimator.fit_transform(X) + if return_n_iter: + return ( + code, + estimator.components_, + estimator.error_, + estimator.n_iter_, + ) + return code, estimator.components_, estimator.error_ + + +class _BaseSparseCoding(ClassNamePrefixFeaturesOutMixin, TransformerMixin): + """Base class from SparseCoder and DictionaryLearning algorithms.""" + + def __init__( + self, + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ): + self.transform_algorithm = transform_algorithm + self.transform_n_nonzero_coefs = transform_n_nonzero_coefs + self.transform_alpha = transform_alpha + self.transform_max_iter = transform_max_iter + self.split_sign = split_sign + self.n_jobs = n_jobs + self.positive_code = positive_code + + def _transform(self, X, dictionary): + """Private method allowing to accommodate both DictionaryLearning and + SparseCoder.""" + X = self._validate_data(X, reset=False) + + if hasattr(self, "alpha") and self.transform_alpha is None: + transform_alpha = self.alpha + else: + transform_alpha = self.transform_alpha + + code = sparse_encode( + X, + dictionary, + algorithm=self.transform_algorithm, + n_nonzero_coefs=self.transform_n_nonzero_coefs, + alpha=transform_alpha, + max_iter=self.transform_max_iter, + n_jobs=self.n_jobs, + positive=self.positive_code, + ) + + if self.split_sign: + # feature vector is split into a positive and negative side + n_samples, n_features = code.shape + split_code = np.empty((n_samples, 2 * n_features)) + split_code[:, :n_features] = np.maximum(code, 0) + split_code[:, n_features:] = -np.minimum(code, 0) + code = split_code + + return code + + def transform(self, X): + """Encode the data as a sparse combination of the dictionary atoms. + + Coding method is determined by the object parameter + `transform_algorithm`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test data to be transformed, must have the same number of + features as the data used to train the model. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + return self._transform(X, self.components_) + + +class SparseCoder(_BaseSparseCoding, BaseEstimator): + """Sparse coding. + + Finds a sparse representation of data against a fixed, precomputed + dictionary. + + Each row of the result is the solution to a sparse coding problem. + The goal is to find a sparse array `code` such that:: + + X ~= code * dictionary + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + dictionary : ndarray of shape (n_components, n_features) + The dictionary atoms used for sparse coding. Lines are assumed to be + normalized to unit norm. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution; + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if + the estimated components are sparse; + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and `algorithm='omp'` + and is overridden by `alpha` in the `omp` case. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of + the reconstruction error targeted. In this case, it overrides + `n_nonzero_coefs`. + If `None`, default to 1. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `lasso_lars`. + + .. versionadded:: 0.22 + + Attributes + ---------- + n_components_ : int + Number of atoms. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate, version of the + dictionary learning algorithm. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparsePCA : Sparse Principal Components Analysis. + sparse_encode : Sparse coding where each row of the result is the solution + to a sparse coding problem. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import SparseCoder + >>> X = np.array([[-1, -1, -1], [0, 0, 3]]) + >>> dictionary = np.array( + ... [[0, 1, 0], + ... [-1, -1, 2], + ... [1, 1, 1], + ... [0, 1, 1], + ... [0, 2, 1]], + ... dtype=np.float64 + ... ) + >>> coder = SparseCoder( + ... dictionary=dictionary, transform_algorithm='lasso_lars', + ... transform_alpha=1e-10, + ... ) + >>> coder.transform(X) + array([[ 0., 0., -1., 0., 0.], + [ 0., 1., 1., 0., 0.]]) + """ + + _required_parameters = ["dictionary"] + + def __init__( + self, + dictionary, + *, + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + split_sign=False, + n_jobs=None, + positive_code=False, + transform_max_iter=1000, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.dictionary = dictionary + + def fit(self, X, y=None): + """Do nothing and return the estimator unchanged. + + This method is just there to implement the usual API and hence + work in pipelines. + + Parameters + ---------- + X : Ignored + Not used, present for API consistency by convention. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + return self + + def transform(self, X, y=None): + """Encode the data as a sparse combination of the dictionary atoms. + + Coding method is determined by the object parameter + `transform_algorithm`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + return super()._transform(X, self.dictionary) + + def _more_tags(self): + return { + "requires_fit": False, + "preserves_dtype": [np.float64, np.float32], + } + + @property + def n_components_(self): + """Number of atoms.""" + return self.dictionary.shape[0] + + @property + def n_features_in_(self): + """Number of features seen during `fit`.""" + return self.dictionary.shape[1] + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.n_components_ + + +class DictionaryLearning(_BaseSparseCoding, BaseEstimator): + """Dictionary learning. + + Finds a dictionary (a set of atoms) that performs well at sparsely + encoding the fitted data. + + Solves the optimization problem:: + + (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 <= 1 for all 0 <= k < n_components + + ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for + the entry-wise matrix norm which is the sum of the absolute values + of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of dictionary elements to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1.0 + Sparsity controlling parameter. + + max_iter : int, default=1000 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for numerical error. + + fit_algorithm : {'lars', 'cd'}, default='lars' + * `'lars'`: uses the least angle regression method to solve the lasso + problem (:func:`~sklearn.linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be + faster if the estimated components are sparse. + + .. versionadded:: 0.17 + *cd* coordinate descent method to improve speed. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (:func:`~sklearn.linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution. + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'` + will be faster if the estimated components are sparse. + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution. + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + .. versionadded:: 0.17 + *lasso_cd* coordinate descent method to improve speed. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and + `algorithm='omp'`. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `None`, defaults to `alpha`. + + .. versionchanged:: 1.2 + When None, default value changed from 1.0 to `alpha`. + + n_jobs : int or None, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + code_init : ndarray of shape (n_samples, n_components), default=None + Initial value for the code, for warm restart. Only used if `code_init` + and `dict_init` are not None. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial values for the dictionary, for warm restart. Only used if + `code_init` and `dict_init` are not None. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + .. versionadded:: 1.3 + + verbose : bool, default=False + To control the verbosity of the procedure. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + .. versionadded:: 0.22 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + dictionary atoms extracted from the data + + error_ : array + vector of errors at each iteration + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + MiniBatchDictionaryLearning: A faster, less accurate, version of the + dictionary learning algorithm. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + + References + ---------- + + J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning + for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import DictionaryLearning + >>> X, dictionary, code = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> dict_learner = DictionaryLearning( + ... n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.1, + ... random_state=42, + ... ) + >>> X_transformed = dict_learner.fit(X).transform(X) + + We can check the level of sparsity of `X_transformed`: + + >>> np.mean(X_transformed == 0) + 0.52... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = X_transformed @ dict_learner.components_ + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.05... + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "fit_algorithm": [StrOptions({"lars", "cd"})], + "transform_algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "transform_alpha": [Interval(Real, 0, None, closed="left"), None], + "n_jobs": [Integral, None], + "code_init": [np.ndarray, None], + "dict_init": [np.ndarray, None], + "callback": [callable, None], + "verbose": ["verbose"], + "split_sign": ["boolean"], + "random_state": ["random_state"], + "positive_code": ["boolean"], + "positive_dict": ["boolean"], + "transform_max_iter": [Interval(Integral, 0, None, closed="left")], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + max_iter=1000, + tol=1e-8, + fit_algorithm="lars", + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + n_jobs=None, + code_init=None, + dict_init=None, + callback=None, + verbose=False, + split_sign=False, + random_state=None, + positive_code=False, + positive_dict=False, + transform_max_iter=1000, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.n_components = n_components + self.alpha = alpha + self.max_iter = max_iter + self.tol = tol + self.fit_algorithm = fit_algorithm + self.code_init = code_init + self.dict_init = dict_init + self.callback = callback + self.verbose = verbose + self.random_state = random_state + self.positive_dict = positive_dict + + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self.fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model from data in X and return the transformed data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + V : ndarray of shape (n_samples, n_components) + Transformed data. + """ + _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code) + + method = "lasso_" + self.fit_algorithm + + random_state = check_random_state(self.random_state) + X = self._validate_data(X) + + if self.n_components is None: + n_components = X.shape[1] + else: + n_components = self.n_components + + V, U, E, self.n_iter_ = _dict_learning( + X, + n_components, + alpha=self.alpha, + tol=self.tol, + max_iter=self.max_iter, + method=method, + method_max_iter=self.transform_max_iter, + n_jobs=self.n_jobs, + code_init=self.code_init, + dict_init=self.dict_init, + callback=self.callback, + verbose=self.verbose, + random_state=random_state, + return_n_iter=True, + positive_dict=self.positive_dict, + positive_code=self.positive_code, + ) + self.components_ = U + self.error_ = E + + return V + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } + + +class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator): + """Mini-batch dictionary learning. + + Finds a dictionary (a set of atoms) that performs well at sparsely + encoding the fitted data. + + Solves the optimization problem:: + + (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 <= 1 for all 0 <= k < n_components + + ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for + the entry-wise matrix norm which is the sum of the absolute values + of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of dictionary elements to extract. + + alpha : float, default=1 + Sparsity controlling parameter. + + max_iter : int, default=1_000 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.1 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `1_000`) instead. + + fit_algorithm : {'lars', 'cd'}, default='lars' + The algorithm used: + + - `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`) + - `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + batch_size : int, default=256 + Number of samples in each mini-batch. + + .. versionchanged:: 1.3 + The default value of `batch_size` changed from 3 to 256 in version 1.3. + + shuffle : bool, default=True + Whether to shuffle the samples before forming batches. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial value of the dictionary for warm restart scenarios. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution. + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster + if the estimated components are sparse. + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution. + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and + `algorithm='omp'`. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `None`, defaults to `alpha`. + + .. versionchanged:: 1.2 + When None, default value changed from 1.0 to `alpha`. + + verbose : bool or int, default=False + To control the verbosity of the procedure. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + .. versionadded:: 0.22 + + callback : callable, default=None + A callable that gets invoked at the end of each iteration. + + .. versionadded:: 1.1 + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + .. versionadded:: 1.1 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Components extracted from the data. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations over the full dataset. + + n_steps_ : int + Number of mini-batches processed. + + .. versionadded:: 1.1 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + + References + ---------- + + J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning + for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import MiniBatchDictionaryLearning + >>> X, dictionary, code = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42) + >>> dict_learner = MiniBatchDictionaryLearning( + ... n_components=15, batch_size=3, transform_algorithm='lasso_lars', + ... transform_alpha=0.1, max_iter=20, random_state=42) + >>> X_transformed = dict_learner.fit_transform(X) + + We can check the level of sparsity of `X_transformed`: + + >>> np.mean(X_transformed == 0) > 0.5 + True + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = X_transformed @ dict_learner.components_ + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.052... + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)], + "fit_algorithm": [StrOptions({"cd", "lars"})], + "n_jobs": [None, Integral], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "dict_init": [None, np.ndarray], + "transform_algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "transform_alpha": [Interval(Real, 0, None, closed="left"), None], + "verbose": ["verbose"], + "split_sign": ["boolean"], + "random_state": ["random_state"], + "positive_code": ["boolean"], + "positive_dict": ["boolean"], + "transform_max_iter": [Interval(Integral, 0, None, closed="left")], + "callback": [None, callable], + "tol": [Interval(Real, 0, None, closed="left")], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + max_iter=1_000, + fit_algorithm="lars", + n_jobs=None, + batch_size=256, + shuffle=True, + dict_init=None, + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + verbose=False, + split_sign=False, + random_state=None, + positive_code=False, + positive_dict=False, + transform_max_iter=1000, + callback=None, + tol=1e-3, + max_no_improvement=10, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.n_components = n_components + self.alpha = alpha + self.max_iter = max_iter + self.fit_algorithm = fit_algorithm + self.dict_init = dict_init + self.verbose = verbose + self.shuffle = shuffle + self.batch_size = batch_size + self.split_sign = split_sign + self.random_state = random_state + self.positive_dict = positive_dict + self.callback = callback + self.max_no_improvement = max_no_improvement + self.tol = tol + + def _check_params(self, X): + # n_components + self._n_components = self.n_components + if self._n_components is None: + self._n_components = X.shape[1] + + # fit_algorithm + _check_positive_coding(self.fit_algorithm, self.positive_code) + self._fit_algorithm = "lasso_" + self.fit_algorithm + + # batch_size + self._batch_size = min(self.batch_size, X.shape[0]) + + def _initialize_dict(self, X, random_state): + """Initialization of the dictionary.""" + if self.dict_init is not None: + dictionary = self.dict_init + else: + # Init V with SVD of X + _, S, dictionary = randomized_svd( + X, self._n_components, random_state=random_state + ) + dictionary = S[:, np.newaxis] * dictionary + + if self._n_components <= len(dictionary): + dictionary = dictionary[: self._n_components, :] + else: + dictionary = np.concatenate( + ( + dictionary, + np.zeros( + (self._n_components - len(dictionary), dictionary.shape[1]), + dtype=dictionary.dtype, + ), + ) + ) + + dictionary = check_array(dictionary, order="F", dtype=X.dtype, copy=False) + dictionary = np.require(dictionary, requirements="W") + + return dictionary + + def _update_inner_stats(self, X, code, batch_size, step): + """Update the inner stats inplace.""" + if step < batch_size - 1: + theta = (step + 1) * batch_size + else: + theta = batch_size**2 + step + 1 - batch_size + beta = (theta + 1 - batch_size) / (theta + 1) + + self._A *= beta + self._A += code.T @ code / batch_size + self._B *= beta + self._B += X.T @ code / batch_size + + def _minibatch_step(self, X, dictionary, random_state, step): + """Perform the update on the dictionary for one minibatch.""" + batch_size = X.shape[0] + + # Compute code for this batch + code = _sparse_encode( + X, + dictionary, + algorithm=self._fit_algorithm, + alpha=self.alpha, + n_jobs=self.n_jobs, + positive=self.positive_code, + max_iter=self.transform_max_iter, + verbose=self.verbose, + ) + + batch_cost = ( + 0.5 * ((X - code @ dictionary) ** 2).sum() + + self.alpha * np.sum(np.abs(code)) + ) / batch_size + + # Update inner stats + self._update_inner_stats(X, code, batch_size, step) + + # Update dictionary + _update_dict( + dictionary, + X, + code, + self._A, + self._B, + verbose=self.verbose, + random_state=random_state, + positive=self.positive_dict, + ) + + return batch_cost + + def _check_convergence( + self, X, batch_cost, new_dict, old_dict, n_samples, step, n_steps + ): + """Helper function to encapsulate the early stopping logic. + + Early stopping is based on two factors: + - A small change of the dictionary between two minibatch updates. This is + controlled by the tol parameter. + - No more improvement on a smoothed estimate of the objective function for a + a certain number of consecutive minibatch updates. This is controlled by + the max_no_improvement parameter. + """ + batch_size = X.shape[0] + + # counts steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore 100 first steps or 1 epoch to avoid initializing the ewa_cost with a + # too bad value + if step <= min(100, n_samples / batch_size): + if self.verbose: + print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}") + return False + + # Compute an Exponentially Weighted Average of the cost function to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_cost is None: + self._ewa_cost = batch_cost + else: + alpha = batch_size / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha + + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch cost: " + f"{batch_cost}, ewa cost: {self._ewa_cost}" + ) + + # Early stopping based on change of dictionary + dict_diff = linalg.norm(new_dict - old_dict) / self._n_components + if self.tol > 0 and dict_diff <= self.tol: + if self.verbose: + print(f"Converged (small dictionary change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # cost function + if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min: + self._no_improvement = 0 + self._ewa_cost_min = self._ewa_cost + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in objective function) " + f"at step {step}/{n_steps}" + ) + return True + + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data( + X, dtype=[np.float64, np.float32], order="C", copy=False + ) + + self._check_params(X) + self._random_state = check_random_state(self.random_state) + + dictionary = self._initialize_dict(X, self._random_state) + old_dict = dictionary.copy() + + if self.shuffle: + X_train = X.copy() + self._random_state.shuffle(X_train) + else: + X_train = X + + n_samples, n_features = X_train.shape + + if self.verbose: + print("[dict_learning]") + + # Inner stats + self._A = np.zeros( + (self._n_components, self._n_components), dtype=X_train.dtype + ) + self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype) + + # TODO(1.6): remove in 1.6 + if self.max_iter is None: + warn( + ( + "`max_iter=None` is deprecated in version 1.4 and will be removed" + " in version 1.6. Use the default value (i.e. `1_000`) instead." + ), + FutureWarning, + ) + max_iter = 1_000 + else: + max_iter = self.max_iter + + # Attributes to monitor the convergence + self._ewa_cost = None + self._ewa_cost_min = None + self._no_improvement = 0 + + batches = gen_batches(n_samples, self._batch_size) + batches = itertools.cycle(batches) + n_steps_per_iter = int(np.ceil(n_samples / self._batch_size)) + n_steps = max_iter * n_steps_per_iter + + i = -1 # to allow max_iter = 0 + + for i, batch in zip(range(n_steps), batches): + X_batch = X_train[batch] + + batch_cost = self._minibatch_step( + X_batch, dictionary, self._random_state, i + ) + + if self._check_convergence( + X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps + ): + break + + # XXX callback param added for backward compat in #18975 but a common + # unified callback API should be preferred + if self.callback is not None: + self.callback(locals()) + + old_dict[:] = dictionary + + self.n_steps_ = i + 1 + self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter) + self.components_ = dictionary + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Update the model using the data in X as a mini-batch. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Return the instance itself. + """ + has_components = hasattr(self, "components_") + + X = self._validate_data( + X, dtype=[np.float64, np.float32], order="C", reset=not has_components + ) + + if not has_components: + # This instance has not been fitted yet (fit or partial_fit) + self._check_params(X) + self._random_state = check_random_state(self.random_state) + + dictionary = self._initialize_dict(X, self._random_state) + + self.n_steps_ = 0 + + self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype) + self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype) + else: + dictionary = self.components_ + + self._minibatch_step(X, dictionary, self._random_state, self.n_steps_) + + self.components_ = dictionary + self.n_steps_ += 1 + + return self + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..af3498d5344836330d016b5c6fb24d0a6f9fd723 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py @@ -0,0 +1,458 @@ +"""Factor Analysis. + +A latent linear variable model. + +FactorAnalysis is similar to probabilistic PCA implemented by PCA.score +While PCA assumes Gaussian noise with the same variance for each +feature, the FactorAnalysis model assumes different variances for +each of them. + +This implementation is based on David Barber's Book, +Bayesian Reasoning and Machine Learning, +http://www.cs.ucl.ac.uk/staff/d.barber/brml, +Algorithm 21.1 +""" + +# Author: Christian Osendorfer +# Alexandre Gramfort +# Denis A. Engemann + +# License: BSD3 + +import warnings +from math import log, sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import fast_logdet, randomized_svd, squared_norm +from ..utils.validation import check_is_fitted + + +class FactorAnalysis(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Factor Analysis (FA). + + A simple linear generative model with Gaussian latent variables. + + The observations are assumed to be caused by a linear transformation of + lower dimensional latent factors and added Gaussian noise. + Without loss of generality the factors are distributed according to a + Gaussian with zero mean and unit covariance. The noise is also zero mean + and has an arbitrary diagonal covariance matrix. + + If we would restrict the model further, by assuming that the Gaussian + noise is even isotropic (all diagonal entries are the same) we would obtain + :class:`PCA`. + + FactorAnalysis performs a maximum likelihood estimate of the so-called + `loading` matrix, the transformation of the latent variables to the + observed ones, using SVD based approach. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_components : int, default=None + Dimensionality of latent space, the number of components + of ``X`` that are obtained after ``transform``. + If None, n_components is set to the number of features. + + tol : float, default=1e-2 + Stopping tolerance for log-likelihood increase. + + copy : bool, default=True + Whether to make a copy of X. If ``False``, the input X gets overwritten + during fitting. + + max_iter : int, default=1000 + Maximum number of iterations. + + noise_variance_init : array-like of shape (n_features,), default=None + The initial guess of the noise variance for each feature. + If None, it defaults to np.ones(n_features). + + svd_method : {'lapack', 'randomized'}, default='randomized' + Which SVD method to use. If 'lapack' use standard SVD from + scipy.linalg, if 'randomized' use fast ``randomized_svd`` function. + Defaults to 'randomized'. For most applications 'randomized' will + be sufficiently precise while providing significant speed gains. + Accuracy can also be improved by setting higher values for + `iterated_power`. If this is not sufficient, for maximum precision + you should choose 'lapack'. + + iterated_power : int, default=3 + Number of iterations for the power method. 3 by default. Only used + if ``svd_method`` equals 'randomized'. + + rotation : {'varimax', 'quartimax'}, default=None + If not None, apply the indicated rotation. Currently, varimax and + quartimax are implemented. See + `"The varimax criterion for analytic rotation in factor analysis" + `_ + H. F. Kaiser, 1958. + + .. versionadded:: 0.24 + + random_state : int or RandomState instance, default=0 + Only used when ``svd_method`` equals 'randomized'. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Components with maximum variance. + + loglike_ : list of shape (n_iterations,) + The log likelihood at each iteration. + + noise_variance_ : ndarray of shape (n_features,) + The estimated noise variance for each feature. + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA: Principal component analysis is also a latent linear variable model + which however assumes equal noise variance for each feature. + This extra assumption makes probabilistic PCA faster as it can be + computed in closed form. + FastICA: Independent component analysis, a latent variable model with + non-Gaussian latent variables. + + References + ---------- + - David Barber, Bayesian Reasoning and Machine Learning, + Algorithm 21.1. + + - Christopher M. Bishop: Pattern Recognition and Machine Learning, + Chapter 12.2.4. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import FactorAnalysis + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = FactorAnalysis(n_components=7, random_state=0) + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 0, None, closed="left"), None], + "tol": [Interval(Real, 0.0, None, closed="left")], + "copy": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "noise_variance_init": ["array-like", None], + "svd_method": [StrOptions({"randomized", "lapack"})], + "iterated_power": [Interval(Integral, 0, None, closed="left")], + "rotation": [StrOptions({"varimax", "quartimax"}), None], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + tol=1e-2, + copy=True, + max_iter=1000, + noise_variance_init=None, + svd_method="randomized", + iterated_power=3, + rotation=None, + random_state=0, + ): + self.n_components = n_components + self.copy = copy + self.tol = tol + self.max_iter = max_iter + self.svd_method = svd_method + + self.noise_variance_init = noise_variance_init + self.iterated_power = iterated_power + self.random_state = random_state + self.rotation = rotation + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the FactorAnalysis model to X using SVD based approach. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Ignored parameter. + + Returns + ------- + self : object + FactorAnalysis class instance. + """ + X = self._validate_data(X, copy=self.copy, dtype=np.float64) + + n_samples, n_features = X.shape + n_components = self.n_components + if n_components is None: + n_components = n_features + + self.mean_ = np.mean(X, axis=0) + X -= self.mean_ + + # some constant terms + nsqrt = sqrt(n_samples) + llconst = n_features * log(2.0 * np.pi) + n_components + var = np.var(X, axis=0) + + if self.noise_variance_init is None: + psi = np.ones(n_features, dtype=X.dtype) + else: + if len(self.noise_variance_init) != n_features: + raise ValueError( + "noise_variance_init dimension does not " + "with number of features : %d != %d" + % (len(self.noise_variance_init), n_features) + ) + psi = np.array(self.noise_variance_init) + + loglike = [] + old_ll = -np.inf + SMALL = 1e-12 + + # we'll modify svd outputs to return unexplained variance + # to allow for unified computation of loglikelihood + if self.svd_method == "lapack": + + def my_svd(X): + _, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False) + return ( + s[:n_components], + Vt[:n_components], + squared_norm(s[n_components:]), + ) + + else: # svd_method == "randomized" + random_state = check_random_state(self.random_state) + + def my_svd(X): + _, s, Vt = randomized_svd( + X, + n_components, + random_state=random_state, + n_iter=self.iterated_power, + ) + return s, Vt, squared_norm(X) - squared_norm(s) + + for i in range(self.max_iter): + # SMALL helps numerics + sqrt_psi = np.sqrt(psi) + SMALL + s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt)) + s **= 2 + # Use 'maximum' here to avoid sqrt problems. + W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt + del Vt + W *= sqrt_psi + + # loglikelihood + ll = llconst + np.sum(np.log(s)) + ll += unexp_var + np.sum(np.log(psi)) + ll *= -n_samples / 2.0 + loglike.append(ll) + if (ll - old_ll) < self.tol: + break + old_ll = ll + + psi = np.maximum(var - np.sum(W**2, axis=0), SMALL) + else: + warnings.warn( + "FactorAnalysis did not converge." + + " You might want" + + " to increase the number of iterations.", + ConvergenceWarning, + ) + + self.components_ = W + if self.rotation is not None: + self.components_ = self._rotate(W) + self.noise_variance_ = psi + self.loglike_ = loglike + self.n_iter_ = i + 1 + return self + + def transform(self, X): + """Apply dimensionality reduction to X using the model. + + Compute the expected mean of the latent variables. + See Barber, 21.2.33 (or Bishop, 12.66). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + The latent variables of X. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + Ih = np.eye(len(self.components_)) + + X_transformed = X - self.mean_ + + Wpsi = self.components_ / self.noise_variance_ + cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T)) + tmp = np.dot(X_transformed, Wpsi.T) + X_transformed = np.dot(tmp, cov_z) + + return X_transformed + + def get_covariance(self): + """Compute data covariance with the FactorAnalysis model. + + ``cov = components_.T * components_ + diag(noise_variance)`` + + Returns + ------- + cov : ndarray of shape (n_features, n_features) + Estimated covariance of data. + """ + check_is_fitted(self) + + cov = np.dot(self.components_.T, self.components_) + cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace + return cov + + def get_precision(self): + """Compute data precision matrix with the FactorAnalysis model. + + Returns + ------- + precision : ndarray of shape (n_features, n_features) + Estimated precision of data. + """ + check_is_fitted(self) + + n_features = self.components_.shape[1] + + # handle corner cases first + if self.n_components == 0: + return np.diag(1.0 / self.noise_variance_) + if self.n_components == n_features: + return linalg.inv(self.get_covariance()) + + # Get precision using matrix inversion lemma + components_ = self.components_ + precision = np.dot(components_ / self.noise_variance_, components_.T) + precision.flat[:: len(precision) + 1] += 1.0 + precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) + precision /= self.noise_variance_[:, np.newaxis] + precision /= -self.noise_variance_[np.newaxis, :] + precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_ + return precision + + def score_samples(self, X): + """Compute the log-likelihood of each sample. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data. + + Returns + ------- + ll : ndarray of shape (n_samples,) + Log-likelihood of each sample under the current model. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + Xr = X - self.mean_ + precision = self.get_precision() + n_features = X.shape[1] + log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision)) + return log_like + + def score(self, X, y=None): + """Compute the average log-likelihood of the samples. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data. + + y : Ignored + Ignored parameter. + + Returns + ------- + ll : float + Average log-likelihood of the samples under the current model. + """ + return np.mean(self.score_samples(X)) + + def _rotate(self, components, n_components=None, tol=1e-6): + "Rotate the factor analysis solution." + # note that tol is not exposed + return _ortho_rotation(components.T, method=self.rotation, tol=tol)[ + : self.n_components + ] + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + +def _ortho_rotation(components, method="varimax", tol=1e-6, max_iter=100): + """Return rotated components.""" + nrow, ncol = components.shape + rotation_matrix = np.eye(ncol) + var = 0 + + for _ in range(max_iter): + comp_rot = np.dot(components, rotation_matrix) + if method == "varimax": + tmp = comp_rot * np.transpose((comp_rot**2).sum(axis=0) / nrow) + elif method == "quartimax": + tmp = 0 + u, s, v = np.linalg.svd(np.dot(components.T, comp_rot**3 - tmp)) + rotation_matrix = np.dot(u, v) + var_new = np.sum(s) + if var != 0 and var_new < var * (1 + tol): + break + var = var_new + + return np.dot(components, rotation_matrix).T diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f36e5ba87dbc0ed737c1f07ec51d35c1f2d18e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py @@ -0,0 +1,795 @@ +""" +Python implementation of the fast ICA algorithms. + +Reference: Tables 8.3 and 8.4 page 196 in the book: +Independent Component Analysis, by Hyvarinen et al. +""" + +# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux, +# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import as_float_array, check_array, check_random_state +from ..utils._param_validation import Interval, Options, StrOptions, validate_params +from ..utils.validation import check_is_fitted + +__all__ = ["fastica", "FastICA"] + + +def _gs_decorrelation(w, W, j): + """ + Orthonormalize w wrt the first j rows of W. + + Parameters + ---------- + w : ndarray of shape (n,) + Array to be orthogonalized + + W : ndarray of shape (p, n) + Null space definition + + j : int < p + The no of (from the first) rows of Null space W wrt which w is + orthogonalized. + + Notes + ----- + Assumes that W is orthogonal + w changed in place + """ + w -= np.linalg.multi_dot([w, W[:j].T, W[:j]]) + return w + + +def _sym_decorrelation(W): + """Symmetric decorrelation + i.e. W <- (W * W.T) ^{-1/2} * W + """ + s, u = linalg.eigh(np.dot(W, W.T)) + # Avoid sqrt of negative values because of rounding errors. Note that + # np.sqrt(tiny) is larger than tiny and therefore this clipping also + # prevents division by zero in the next step. + s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None) + + # u (resp. s) contains the eigenvectors (resp. square roots of + # the eigenvalues) of W * W.T + return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W]) + + +def _ica_def(X, tol, g, fun_args, max_iter, w_init): + """Deflationary FastICA using fun approx to neg-entropy function + + Used internally by FastICA. + """ + + n_components = w_init.shape[0] + W = np.zeros((n_components, n_components), dtype=X.dtype) + n_iter = [] + + # j is the index of the extracted component + for j in range(n_components): + w = w_init[j, :].copy() + w /= np.sqrt((w**2).sum()) + + for i in range(max_iter): + gwtx, g_wtx = g(np.dot(w.T, X), fun_args) + + w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w + + _gs_decorrelation(w1, W, j) + + w1 /= np.sqrt((w1**2).sum()) + + lim = np.abs(np.abs((w1 * w).sum()) - 1) + w = w1 + if lim < tol: + break + + n_iter.append(i + 1) + W[j, :] = w + + return W, max(n_iter) + + +def _ica_par(X, tol, g, fun_args, max_iter, w_init): + """Parallel FastICA. + + Used internally by FastICA --main loop + + """ + W = _sym_decorrelation(w_init) + del w_init + p_ = float(X.shape[1]) + for ii in range(max_iter): + gwtx, g_wtx = g(np.dot(W, X), fun_args) + W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W) + del gwtx, g_wtx + # builtin max, abs are faster than numpy counter parts. + # np.einsum allows having the lowest memory footprint. + # It is faster than np.diag(np.dot(W1, W.T)). + lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1)) + W = W1 + if lim < tol: + break + else: + warnings.warn( + ( + "FastICA did not converge. Consider increasing " + "tolerance or the maximum number of iterations." + ), + ConvergenceWarning, + ) + + return W, ii + 1 + + +# Some standard non-linear functions. +# XXX: these should be optimized, as they can be a bottleneck. +def _logcosh(x, fun_args=None): + alpha = fun_args.get("alpha", 1.0) # comment it out? + + x *= alpha + gx = np.tanh(x, x) # apply the tanh inplace + g_x = np.empty(x.shape[0], dtype=x.dtype) + # XXX compute in chunks to avoid extra allocation + for i, gx_i in enumerate(gx): # please don't vectorize. + g_x[i] = (alpha * (1 - gx_i**2)).mean() + return gx, g_x + + +def _exp(x, fun_args): + exp = np.exp(-(x**2) / 2) + gx = x * exp + g_x = (1 - x**2) * exp + return gx, g_x.mean(axis=-1) + + +def _cube(x, fun_args): + return x**3, (3 * x**2).mean(axis=-1) + + +@validate_params( + { + "X": ["array-like"], + "return_X_mean": ["boolean"], + "compute_sources": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def fastica( + X, + n_components=None, + *, + algorithm="parallel", + whiten="unit-variance", + fun="logcosh", + fun_args=None, + max_iter=200, + tol=1e-04, + w_init=None, + whiten_solver="svd", + random_state=None, + return_X_mean=False, + compute_sources=True, + return_n_iter=False, +): + """Perform Fast Independent Component Analysis. + + The implementation is based on [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + n_components : int, default=None + Number of components to use. If None is passed, all are used. + + algorithm : {'parallel', 'deflation'}, default='parallel' + Specify which algorithm to use for FastICA. + + whiten : str or bool, default='unit-variance' + Specify the whitening strategy to use. + + - If 'arbitrary-variance', a whitening with variance + arbitrary is used. + - If 'unit-variance', the whitening matrix is rescaled to ensure that + each recovered source has unit variance. + - If False, the data is already considered to be whitened, and no + whitening is performed. + + .. versionchanged:: 1.3 + The default value of `whiten` changed to 'unit-variance' in 1.3. + + fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' + The functional form of the G function used in the + approximation to neg-entropy. Could be either 'logcosh', 'exp', + or 'cube'. + You can also provide your own function. It should return a tuple + containing the value of the function, and of its derivative, in the + point. The derivative should be averaged along its last dimension. + Example:: + + def my_g(x): + return x ** 3, (3 * x ** 2).mean(axis=-1) + + fun_args : dict, default=None + Arguments to send to the functional form. + If empty or None and if fun='logcosh', fun_args will take value + {'alpha' : 1.0}. + + max_iter : int, default=200 + Maximum number of iterations to perform. + + tol : float, default=1e-4 + A positive scalar giving the tolerance at which the + un-mixing matrix is considered to have converged. + + w_init : ndarray of shape (n_components, n_components), default=None + Initial un-mixing array. If `w_init=None`, then an array of values + drawn from a normal distribution is used. + + whiten_solver : {"eigh", "svd"}, default="svd" + The solver to use for whitening. + + - "svd" is more stable numerically if the problem is degenerate, and + often faster when `n_samples <= n_features`. + + - "eigh" is generally more memory efficient when + `n_samples >= n_features`, and can be faster when + `n_samples >= 50 * n_features`. + + .. versionadded:: 1.2 + + random_state : int, RandomState instance or None, default=None + Used to initialize ``w_init`` when not specified, with a + normal distribution. Pass an int, for reproducible results + across multiple function calls. + See :term:`Glossary `. + + return_X_mean : bool, default=False + If True, X_mean is returned too. + + compute_sources : bool, default=True + If False, sources are not computed, but only the rotation matrix. + This can save memory when working with big data. Defaults to True. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + K : ndarray of shape (n_components, n_features) or None + If whiten is 'True', K is the pre-whitening matrix that projects data + onto the first n_components principal components. If whiten is 'False', + K is 'None'. + + W : ndarray of shape (n_components, n_components) + The square matrix that unmixes the data after whitening. + The mixing matrix is the pseudo-inverse of matrix ``W K`` + if K is not None, else it is the inverse of W. + + S : ndarray of shape (n_samples, n_components) or None + Estimated source matrix. + + X_mean : ndarray of shape (n_features,) + The mean over features. Returned only if return_X_mean is True. + + n_iter : int + If the algorithm is "deflation", n_iter is the + maximum number of iterations run across all components. Else + they are just the number of iterations taken to converge. This is + returned only when return_n_iter is set to `True`. + + Notes + ----- + The data matrix X is considered to be a linear combination of + non-Gaussian (independent) components i.e. X = AS where columns of S + contain the independent components and A is a linear mixing + matrix. In short ICA attempts to `un-mix' the data by estimating an + un-mixing matrix W where ``S = W K X.`` + While FastICA was proposed to estimate as many sources + as features, it is possible to estimate less by setting + n_components < n_features. It this case K is not a square matrix + and the estimated A is the pseudo-inverse of ``W K``. + + This implementation was originally made for data of shape + [n_features, n_samples]. Now the input is transposed + before the algorithm is applied. This makes it slightly + faster for Fortran-ordered input. + + References + ---------- + .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis", + Algorithms and Applications, Neural Networks, 13(4-5), 2000, + pp. 411-430. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import fastica + >>> X, _ = load_digits(return_X_y=True) + >>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance') + >>> K.shape + (7, 64) + >>> W.shape + (7, 7) + >>> S.shape + (1797, 7) + """ + est = FastICA( + n_components=n_components, + algorithm=algorithm, + whiten=whiten, + fun=fun, + fun_args=fun_args, + max_iter=max_iter, + tol=tol, + w_init=w_init, + whiten_solver=whiten_solver, + random_state=random_state, + ) + est._validate_params() + S = est._fit_transform(X, compute_sources=compute_sources) + + if est.whiten in ["unit-variance", "arbitrary-variance"]: + K = est.whitening_ + X_mean = est.mean_ + else: + K = None + X_mean = None + + returned_values = [K, est._unmixing, S] + if return_X_mean: + returned_values.append(X_mean) + if return_n_iter: + returned_values.append(est.n_iter_) + + return returned_values + + +class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """FastICA: a fast algorithm for Independent Component Analysis. + + The implementation is based on [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of components to use. If None is passed, all are used. + + algorithm : {'parallel', 'deflation'}, default='parallel' + Specify which algorithm to use for FastICA. + + whiten : str or bool, default='unit-variance' + Specify the whitening strategy to use. + + - If 'arbitrary-variance', a whitening with variance + arbitrary is used. + - If 'unit-variance', the whitening matrix is rescaled to ensure that + each recovered source has unit variance. + - If False, the data is already considered to be whitened, and no + whitening is performed. + + .. versionchanged:: 1.3 + The default value of `whiten` changed to 'unit-variance' in 1.3. + + fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' + The functional form of the G function used in the + approximation to neg-entropy. Could be either 'logcosh', 'exp', + or 'cube'. + You can also provide your own function. It should return a tuple + containing the value of the function, and of its derivative, in the + point. The derivative should be averaged along its last dimension. + Example:: + + def my_g(x): + return x ** 3, (3 * x ** 2).mean(axis=-1) + + fun_args : dict, default=None + Arguments to send to the functional form. + If empty or None and if fun='logcosh', fun_args will take value + {'alpha' : 1.0}. + + max_iter : int, default=200 + Maximum number of iterations during fit. + + tol : float, default=1e-4 + A positive scalar giving the tolerance at which the + un-mixing matrix is considered to have converged. + + w_init : array-like of shape (n_components, n_components), default=None + Initial un-mixing array. If `w_init=None`, then an array of values + drawn from a normal distribution is used. + + whiten_solver : {"eigh", "svd"}, default="svd" + The solver to use for whitening. + + - "svd" is more stable numerically if the problem is degenerate, and + often faster when `n_samples <= n_features`. + + - "eigh" is generally more memory efficient when + `n_samples >= n_features`, and can be faster when + `n_samples >= 50 * n_features`. + + .. versionadded:: 1.2 + + random_state : int, RandomState instance or None, default=None + Used to initialize ``w_init`` when not specified, with a + normal distribution. Pass an int, for reproducible results + across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The linear operator to apply to the data to get the independent + sources. This is equal to the unmixing matrix when ``whiten`` is + False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when + ``whiten`` is True. + + mixing_ : ndarray of shape (n_features, n_components) + The pseudo-inverse of ``components_``. It is the linear operator + that maps independent sources to the data. + + mean_ : ndarray of shape(n_features,) + The mean over features. Only set if `self.whiten` is True. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + If the algorithm is "deflation", n_iter is the + maximum number of iterations run across all components. Else + they are just the number of iterations taken to converge. + + whitening_ : ndarray of shape (n_components, n_features) + Only set if whiten is 'True'. This is the pre-whitening matrix + that projects data onto the first `n_components` principal components. + + See Also + -------- + PCA : Principal component analysis (PCA). + IncrementalPCA : Incremental principal components analysis (IPCA). + KernelPCA : Kernel Principal component analysis (KPCA). + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparsePCA : Sparse Principal Components Analysis (SparsePCA). + + References + ---------- + .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis: + Algorithms and Applications, Neural Networks, 13(4-5), 2000, + pp. 411-430. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import FastICA + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = FastICA(n_components=7, + ... random_state=0, + ... whiten='unit-variance') + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "algorithm": [StrOptions({"parallel", "deflation"})], + "whiten": [ + StrOptions({"arbitrary-variance", "unit-variance"}), + Options(bool, {False}), + ], + "fun": [StrOptions({"logcosh", "exp", "cube"}), callable], + "fun_args": [dict, None], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "w_init": ["array-like", None], + "whiten_solver": [StrOptions({"eigh", "svd"})], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + algorithm="parallel", + whiten="unit-variance", + fun="logcosh", + fun_args=None, + max_iter=200, + tol=1e-4, + w_init=None, + whiten_solver="svd", + random_state=None, + ): + super().__init__() + self.n_components = n_components + self.algorithm = algorithm + self.whiten = whiten + self.fun = fun + self.fun_args = fun_args + self.max_iter = max_iter + self.tol = tol + self.w_init = w_init + self.whiten_solver = whiten_solver + self.random_state = random_state + + def _fit_transform(self, X, compute_sources=False): + """Fit the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + compute_sources : bool, default=False + If False, sources are not computes but only the rotation matrix. + This can save memory when working with big data. Defaults to False. + + Returns + ------- + S : ndarray of shape (n_samples, n_components) or None + Sources matrix. `None` if `compute_sources` is `False`. + """ + XT = self._validate_data( + X, copy=self.whiten, dtype=[np.float64, np.float32], ensure_min_samples=2 + ).T + fun_args = {} if self.fun_args is None else self.fun_args + random_state = check_random_state(self.random_state) + + alpha = fun_args.get("alpha", 1.0) + if not 1 <= alpha <= 2: + raise ValueError("alpha must be in [1,2]") + + if self.fun == "logcosh": + g = _logcosh + elif self.fun == "exp": + g = _exp + elif self.fun == "cube": + g = _cube + elif callable(self.fun): + + def g(x, fun_args): + return self.fun(x, **fun_args) + + n_features, n_samples = XT.shape + n_components = self.n_components + if not self.whiten and n_components is not None: + n_components = None + warnings.warn("Ignoring n_components with whiten=False.") + + if n_components is None: + n_components = min(n_samples, n_features) + if n_components > min(n_samples, n_features): + n_components = min(n_samples, n_features) + warnings.warn( + "n_components is too large: it will be set to %s" % n_components + ) + + if self.whiten: + # Centering the features of X + X_mean = XT.mean(axis=-1) + XT -= X_mean[:, np.newaxis] + + # Whitening and preprocessing by PCA + if self.whiten_solver == "eigh": + # Faster when num_samples >> n_features + d, u = linalg.eigh(XT.dot(X)) + sort_indices = np.argsort(d)[::-1] + eps = np.finfo(d.dtype).eps + degenerate_idx = d < eps + if np.any(degenerate_idx): + warnings.warn( + "There are some small singular values, using " + "whiten_solver = 'svd' might lead to more " + "accurate results." + ) + d[degenerate_idx] = eps # For numerical issues + np.sqrt(d, out=d) + d, u = d[sort_indices], u[:, sort_indices] + elif self.whiten_solver == "svd": + u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2] + + # Give consistent eigenvectors for both svd solvers + u *= np.sign(u[0]) + + K = (u / d).T[:n_components] # see (6.33) p.140 + del u, d + X1 = np.dot(K, XT) + # see (13.6) p.267 Here X1 is white and data + # in X has been projected onto a subspace by PCA + X1 *= np.sqrt(n_samples) + else: + # X must be casted to floats to avoid typing issues with numpy + # 2.0 and the line below + X1 = as_float_array(XT, copy=False) # copy has been taken care of + + w_init = self.w_init + if w_init is None: + w_init = np.asarray( + random_state.normal(size=(n_components, n_components)), dtype=X1.dtype + ) + + else: + w_init = np.asarray(w_init) + if w_init.shape != (n_components, n_components): + raise ValueError( + "w_init has invalid shape -- should be %(shape)s" + % {"shape": (n_components, n_components)} + ) + + kwargs = { + "tol": self.tol, + "g": g, + "fun_args": fun_args, + "max_iter": self.max_iter, + "w_init": w_init, + } + + if self.algorithm == "parallel": + W, n_iter = _ica_par(X1, **kwargs) + elif self.algorithm == "deflation": + W, n_iter = _ica_def(X1, **kwargs) + del X1 + + self.n_iter_ = n_iter + + if compute_sources: + if self.whiten: + S = np.linalg.multi_dot([W, K, XT]).T + else: + S = np.dot(W, XT).T + else: + S = None + + if self.whiten: + if self.whiten == "unit-variance": + if not compute_sources: + S = np.linalg.multi_dot([W, K, XT]).T + S_std = np.std(S, axis=0, keepdims=True) + S /= S_std + W /= S_std.T + + self.components_ = np.dot(W, K) + self.mean_ = X_mean + self.whitening_ = K + else: + self.components_ = W + + self.mixing_ = linalg.pinv(self.components_, check_finite=False) + self._unmixing = W + + return S + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model and recover the sources from X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Estimated sources obtained by transforming the data with the + estimated unmixing matrix. + """ + return self._fit_transform(X, compute_sources=True) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self._fit_transform(X, compute_sources=False) + return self + + def transform(self, X, copy=True): + """Recover the sources from X (apply the unmixing matrix). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to transform, where `n_samples` is the number of samples + and `n_features` is the number of features. + + copy : bool, default=True + If False, data passed to fit can be overwritten. Defaults to True. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Estimated sources obtained by transforming the data with the + estimated unmixing matrix. + """ + check_is_fitted(self) + + X = self._validate_data( + X, copy=(copy and self.whiten), dtype=[np.float64, np.float32], reset=False + ) + if self.whiten: + X -= self.mean_ + + return np.dot(X, self.components_.T) + + def inverse_transform(self, X, copy=True): + """Transform the sources back to the mixed data (apply mixing matrix). + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + Sources, where `n_samples` is the number of samples + and `n_components` is the number of components. + copy : bool, default=True + If False, data passed to fit are overwritten. Defaults to True. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Reconstructed data obtained with the mixing matrix. + """ + check_is_fitted(self) + + X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32]) + X = np.dot(X, self.mixing_.T) + if self.whiten: + X += self.mean_ + + return X + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return {"preserves_dtype": [np.float32, np.float64]} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..1089b2c54e086a79314d59b63532ff4842d9ccc8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py @@ -0,0 +1,409 @@ +"""Incremental Principal Components Analysis.""" + +# Author: Kyle Kastner +# Giorgio Patrini +# License: BSD 3 clause + +from numbers import Integral + +import numpy as np +from scipy import linalg, sparse + +from ..base import _fit_context +from ..utils import gen_batches +from ..utils._param_validation import Interval +from ..utils.extmath import _incremental_mean_and_var, svd_flip +from ._base import _BasePCA + + +class IncrementalPCA(_BasePCA): + """Incremental principal components analysis (IPCA). + + Linear dimensionality reduction using Singular Value Decomposition of + the data, keeping only the most significant singular vectors to + project the data to a lower dimensional space. The input data is centered + but not scaled for each feature before applying the SVD. + + Depending on the size of the input data, this algorithm can be much more + memory efficient than a PCA, and allows sparse input. + + This algorithm has constant memory complexity, on the order + of ``batch_size * n_features``, enabling use of np.memmap files without + loading the entire file into memory. For sparse matrices, the input + is converted to dense in batches (in order to be able to subtract the + mean) which avoids storing the entire dense matrix at any one time. + + The computational overhead of each SVD is + ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples + remain in memory at a time. There will be ``n_samples / batch_size`` SVD + computations to get the principal components, versus 1 large SVD of + complexity ``O(n_samples * n_features ** 2)`` for PCA. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_incremental_pca.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + n_components : int, default=None + Number of components to keep. If ``n_components`` is ``None``, + then ``n_components`` is set to ``min(n_samples, n_features)``. + + whiten : bool, default=False + When True (False by default) the ``components_`` vectors are divided + by ``n_samples`` times ``components_`` to ensure uncorrelated outputs + with unit component-wise variances. + + Whitening will remove some information from the transformed signal + (the relative variance scales of the components) but can sometimes + improve the predictive accuracy of the downstream estimators by + making data respect some hard-wired assumptions. + + copy : bool, default=True + If False, X will be overwritten. ``copy=False`` can be used to + save memory but is unsafe for general use. + + batch_size : int, default=None + The number of samples to use for each batch. Only used when calling + ``fit``. If ``batch_size`` is ``None``, then ``batch_size`` + is inferred from the data and set to ``5 * n_features``, to provide a + balance between approximation accuracy and memory consumption. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of + maximum variance in the data. Equivalently, the right singular + vectors of the centered input data, parallel to its eigenvectors. + The components are sorted by decreasing ``explained_variance_``. + + explained_variance_ : ndarray of shape (n_components,) + Variance explained by each of the selected components. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + If all components are stored, the sum of explained variances is equal + to 1.0. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, aggregate over calls to ``partial_fit``. + + var_ : ndarray of shape (n_features,) + Per-feature empirical variance, aggregate over calls to + ``partial_fit``. + + noise_variance_ : float + The estimated noise covariance following the Probabilistic PCA model + from Tipping and Bishop 1999. See "Pattern Recognition and + Machine Learning" by C. Bishop, 12.2.1 p. 574 or + http://www.miketipping.com/papers/met-mppca.pdf. + + n_components_ : int + The estimated number of components. Relevant when + ``n_components=None``. + + n_samples_seen_ : int + The number of samples processed by the estimator. Will be reset on + new calls to fit, but increments across ``partial_fit`` calls. + + batch_size_ : int + Inferred batch size from ``batch_size``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA : Principal component analysis (PCA). + KernelPCA : Kernel Principal component analysis (KPCA). + SparsePCA : Sparse Principal Components Analysis (SparsePCA). + TruncatedSVD : Dimensionality reduction using truncated SVD. + + Notes + ----- + Implements the incremental PCA model from: + *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual + Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, + pp. 125-141, May 2008.* + See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf + + This model is an extension of the Sequential Karhunen-Loeve Transform from: + :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and + its Application to Images, IEEE Transactions on Image Processing, Volume 9, + Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>` + + We have specifically abstained from an optimization used by authors of both + papers, a QR decomposition used in specific situations to reduce the + algorithmic complexity of the SVD. The source for this technique is + *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5, + section 5.4.4, pp 252-253.*. This technique has been omitted because it is + advantageous only when decomposing a matrix with ``n_samples`` (rows) + >= 5/3 * ``n_features`` (columns), and hurts the readability of the + implemented algorithm. This would be a good opportunity for future + optimization, if it is deemed necessary. + + References + ---------- + D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual + Tracking, International Journal of Computer Vision, Volume 77, + Issue 1-3, pp. 125-141, May 2008. + + G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5, + Section 5.4.4, pp. 252-253. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import IncrementalPCA + >>> from scipy import sparse + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = IncrementalPCA(n_components=7, batch_size=200) + >>> # either partially fit on smaller batches of data + >>> transformer.partial_fit(X[:100, :]) + IncrementalPCA(batch_size=200, n_components=7) + >>> # or let the fit function itself divide the data into batches + >>> X_sparse = sparse.csr_matrix(X) + >>> X_transformed = transformer.fit_transform(X_sparse) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "whiten": ["boolean"], + "copy": ["boolean"], + "batch_size": [Interval(Integral, 1, None, closed="left"), None], + } + + def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None): + self.n_components = n_components + self.whiten = whiten + self.copy = copy + self.batch_size = batch_size + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X, using minibatches of size batch_size. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self.components_ = None + self.n_samples_seen_ = 0 + self.mean_ = 0.0 + self.var_ = 0.0 + self.singular_values_ = None + self.explained_variance_ = None + self.explained_variance_ratio_ = None + self.noise_variance_ = None + + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "lil"], + copy=self.copy, + dtype=[np.float64, np.float32], + ) + n_samples, n_features = X.shape + + if self.batch_size is None: + self.batch_size_ = 5 * n_features + else: + self.batch_size_ = self.batch_size + + for batch in gen_batches( + n_samples, self.batch_size_, min_batch_size=self.n_components or 0 + ): + X_batch = X[batch] + if sparse.issparse(X_batch): + X_batch = X_batch.toarray() + self.partial_fit(X_batch, check_input=False) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, check_input=True): + """Incremental fit with X. All of X is processed as a single batch. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + check_input : bool, default=True + Run check_array on X. + + Returns + ------- + self : object + Returns the instance itself. + """ + first_pass = not hasattr(self, "components_") + + if check_input: + if sparse.issparse(X): + raise TypeError( + "IncrementalPCA.partial_fit does not support " + "sparse input. Either convert data to dense " + "or use IncrementalPCA.fit to do so in batches." + ) + X = self._validate_data( + X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass + ) + n_samples, n_features = X.shape + if first_pass: + self.components_ = None + + if self.n_components is None: + if self.components_ is None: + self.n_components_ = min(n_samples, n_features) + else: + self.n_components_ = self.components_.shape[0] + elif not self.n_components <= n_features: + raise ValueError( + "n_components=%r invalid for n_features=%d, need " + "more rows than columns for IncrementalPCA " + "processing" % (self.n_components, n_features) + ) + elif not self.n_components <= n_samples: + raise ValueError( + "n_components=%r must be less or equal to " + "the batch number of samples " + "%d." % (self.n_components, n_samples) + ) + else: + self.n_components_ = self.n_components + + if (self.components_ is not None) and ( + self.components_.shape[0] != self.n_components_ + ): + raise ValueError( + "Number of input features has changed from %i " + "to %i between calls to partial_fit! Try " + "setting n_components to a fixed value." + % (self.components_.shape[0], self.n_components_) + ) + + # This is the first partial_fit + if not hasattr(self, "n_samples_seen_"): + self.n_samples_seen_ = 0 + self.mean_ = 0.0 + self.var_ = 0.0 + + # Update stats - they are 0 if this is the first step + col_mean, col_var, n_total_samples = _incremental_mean_and_var( + X, + last_mean=self.mean_, + last_variance=self.var_, + last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]), + ) + n_total_samples = n_total_samples[0] + + # Whitening + if self.n_samples_seen_ == 0: + # If it is the first step, simply whiten X + X -= col_mean + else: + col_batch_mean = np.mean(X, axis=0) + X -= col_batch_mean + # Build matrix of combined previous basis and new data + mean_correction = np.sqrt( + (self.n_samples_seen_ / n_total_samples) * n_samples + ) * (self.mean_ - col_batch_mean) + X = np.vstack( + ( + self.singular_values_.reshape((-1, 1)) * self.components_, + X, + mean_correction, + ) + ) + + U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False) + U, Vt = svd_flip(U, Vt, u_based_decision=False) + explained_variance = S**2 / (n_total_samples - 1) + explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples) + + self.n_samples_seen_ = n_total_samples + self.components_ = Vt[: self.n_components_] + self.singular_values_ = S[: self.n_components_] + self.mean_ = col_mean + self.var_ = col_var + self.explained_variance_ = explained_variance[: self.n_components_] + self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_] + # we already checked `self.n_components <= n_samples` above + if self.n_components_ not in (n_samples, n_features): + self.noise_variance_ = explained_variance[self.n_components_ :].mean() + else: + self.noise_variance_ = 0.0 + return self + + def transform(self, X): + """Apply dimensionality reduction to X. + + X is projected on the first principal components previously extracted + from a training set, using minibatches of size batch_size if X is + sparse. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Projection of X in the first principal components. + + Examples + -------- + + >>> import numpy as np + >>> from sklearn.decomposition import IncrementalPCA + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], + ... [1, 1], [2, 1], [3, 2]]) + >>> ipca = IncrementalPCA(n_components=2, batch_size=3) + >>> ipca.fit(X) + IncrementalPCA(batch_size=3, n_components=2) + >>> ipca.transform(X) # doctest: +SKIP + """ + if sparse.issparse(X): + n_samples = X.shape[0] + output = [] + for batch in gen_batches( + n_samples, self.batch_size_, min_batch_size=self.n_components or 0 + ): + output.append(super().transform(X[batch].toarray())) + return np.vstack(output) + else: + return super().transform(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..8fc4aa26a6dfb87428ce59c58d18632cffdc2ad6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py @@ -0,0 +1,572 @@ +"""Kernel Principal Components Analysis.""" + +# Author: Mathieu Blondel +# Sylvain Marie +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.linalg import eigh +from scipy.sparse.linalg import eigsh + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import NotFittedError +from ..metrics.pairwise import pairwise_kernels +from ..preprocessing import KernelCenterer +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import _randomized_eigsh, svd_flip +from ..utils.validation import ( + _check_psd_eigenvalues, + check_is_fitted, +) + + +class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Kernel Principal component analysis (KPCA) [1]_. + + Non-linear dimensionality reduction through the use of kernels (see + :ref:`metrics`). + + It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD + or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the + truncated SVD, depending on the shape of the input data and the number of + components to extract. It can also use a randomized truncated SVD by the + method proposed in [3]_, see `eigen_solver`. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_kernel_pca.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of components. If None, all non-zero components are kept. + + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \ + or callable, default='linear' + Kernel used for PCA. + + gamma : float, default=None + Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other + kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``. + + degree : float, default=3 + Degree for poly kernels. Ignored by other kernels. + + coef0 : float, default=1 + Independent term in poly and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict, default=None + Parameters (keyword arguments) and + values for kernel passed as callable object. + Ignored by other kernels. + + alpha : float, default=1.0 + Hyperparameter of the ridge regression that learns the + inverse transform (when fit_inverse_transform=True). + + fit_inverse_transform : bool, default=False + Learn the inverse transform for non-precomputed kernels + (i.e. learn to find the pre-image of a point). This method is based + on [2]_. + + eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \ + default='auto' + Select eigensolver to use. If `n_components` is much + less than the number of training samples, randomized (or arpack to a + smaller extent) may be more efficient than the dense eigensolver. + Randomized SVD is performed according to the method of Halko et al + [3]_. + + auto : + the solver is selected by a default policy based on n_samples + (the number of training samples) and `n_components`: + if the number of components to extract is less than 10 (strict) and + the number of samples is more than 200 (strict), the 'arpack' + method is enabled. Otherwise the exact full eigenvalue + decomposition is computed and optionally truncated afterwards + ('dense' method). + dense : + run exact full eigenvalue decomposition calling the standard + LAPACK solver via `scipy.linalg.eigh`, and select the components + by postprocessing + arpack : + run SVD truncated to n_components calling ARPACK solver using + `scipy.sparse.linalg.eigsh`. It requires strictly + 0 < n_components < n_samples + randomized : + run randomized SVD by the method of Halko et al. [3]_. The current + implementation selects eigenvalues based on their module; therefore + using this method can lead to unexpected results if the kernel is + not positive semi-definite. See also [4]_. + + .. versionchanged:: 1.0 + `'randomized'` was added. + + tol : float, default=0 + Convergence tolerance for arpack. + If 0, optimal value will be chosen by arpack. + + max_iter : int, default=None + Maximum number of iterations for arpack. + If None, optimal value will be chosen by arpack. + + iterated_power : int >= 0, or 'auto', default='auto' + Number of iterations for the power method computed by + svd_solver == 'randomized'. When 'auto', it is set to 7 when + `n_components < 0.1 * min(X.shape)`, other it is set to 4. + + .. versionadded:: 1.0 + + remove_zero_eig : bool, default=False + If True, then all components with zero eigenvalues are removed, so + that the number of components in the output may be < n_components + (and sometimes even zero due to numerical instability). + When n_components is None, this parameter is ignored and components + with zero eigenvalues are removed regardless. + + random_state : int, RandomState instance or None, default=None + Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + .. versionadded:: 0.18 + + copy_X : bool, default=True + If True, input X is copied and stored by the model in the `X_fit_` + attribute. If no further changes will be done to X, setting + `copy_X=False` saves memory by storing a reference. + + .. versionadded:: 0.18 + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.18 + + Attributes + ---------- + eigenvalues_ : ndarray of shape (n_components,) + Eigenvalues of the centered kernel matrix in decreasing order. + If `n_components` and `remove_zero_eig` are not set, + then all values are stored. + + eigenvectors_ : ndarray of shape (n_samples, n_components) + Eigenvectors of the centered kernel matrix. If `n_components` and + `remove_zero_eig` are not set, then all components are stored. + + dual_coef_ : ndarray of shape (n_samples, n_features) + Inverse transform matrix. Only available when + ``fit_inverse_transform`` is True. + + X_transformed_fit_ : ndarray of shape (n_samples, n_components) + Projection of the fitted data on the kernel principal components. + Only available when ``fit_inverse_transform`` is True. + + X_fit_ : ndarray of shape (n_samples, n_features) + The data used to fit the model. If `copy_X=False`, then `X_fit_` is + a reference. This attribute is used for the calls to transform. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + gamma_ : float + Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma` + is explicitly provided, this is just the same as `gamma`. When `gamma` + is `None`, this is the actual value of kernel coefficient. + + .. versionadded:: 1.3 + + See Also + -------- + FastICA : A fast algorithm for Independent Component Analysis. + IncrementalPCA : Incremental Principal Component Analysis. + NMF : Non-Negative Matrix Factorization. + PCA : Principal Component Analysis. + SparsePCA : Sparse Principal Component Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + References + ---------- + .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller. + "Kernel principal component analysis." + International conference on artificial neural networks. + Springer, Berlin, Heidelberg, 1997. + `_ + + .. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. + "Learning to find pre-images." + Advances in neural information processing systems 16 (2004): 449-456. + `_ + + .. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp. + "Finding structure with randomness: Probabilistic algorithms for + constructing approximate matrix decompositions." + SIAM review 53.2 (2011): 217-288. <0909.4061>` + + .. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert. + "A randomized algorithm for the decomposition of matrices." + Applied and Computational Harmonic Analysis 30.1 (2011): 47-68. + `_ + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import KernelPCA + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = KernelPCA(n_components=7, kernel='linear') + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "kernel": [ + StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}), + callable, + ], + "gamma": [ + Interval(Real, 0, None, closed="left"), + None, + ], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + "alpha": [Interval(Real, 0, None, closed="left")], + "fit_inverse_transform": ["boolean"], + "eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "iterated_power": [ + Interval(Integral, 0, None, closed="left"), + StrOptions({"auto"}), + ], + "remove_zero_eig": ["boolean"], + "random_state": ["random_state"], + "copy_X": ["boolean"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + n_components=None, + *, + kernel="linear", + gamma=None, + degree=3, + coef0=1, + kernel_params=None, + alpha=1.0, + fit_inverse_transform=False, + eigen_solver="auto", + tol=0, + max_iter=None, + iterated_power="auto", + remove_zero_eig=False, + random_state=None, + copy_X=True, + n_jobs=None, + ): + self.n_components = n_components + self.kernel = kernel + self.kernel_params = kernel_params + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.alpha = alpha + self.fit_inverse_transform = fit_inverse_transform + self.eigen_solver = eigen_solver + self.tol = tol + self.max_iter = max_iter + self.iterated_power = iterated_power + self.remove_zero_eig = remove_zero_eig + self.random_state = random_state + self.n_jobs = n_jobs + self.copy_X = copy_X + + def _get_kernel(self, X, Y=None): + if callable(self.kernel): + params = self.kernel_params or {} + else: + params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0} + return pairwise_kernels( + X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params + ) + + def _fit_transform(self, K): + """Fit's using kernel K""" + # center kernel + K = self._centerer.fit_transform(K) + + # adjust n_components according to user inputs + if self.n_components is None: + n_components = K.shape[0] # use all dimensions + else: + n_components = min(K.shape[0], self.n_components) + + # compute eigenvectors + if self.eigen_solver == "auto": + if K.shape[0] > 200 and n_components < 10: + eigen_solver = "arpack" + else: + eigen_solver = "dense" + else: + eigen_solver = self.eigen_solver + + if eigen_solver == "dense": + # Note: subset_by_index specifies the indices of smallest/largest to return + self.eigenvalues_, self.eigenvectors_ = eigh( + K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1) + ) + elif eigen_solver == "arpack": + v0 = _init_arpack_v0(K.shape[0], self.random_state) + self.eigenvalues_, self.eigenvectors_ = eigsh( + K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0 + ) + elif eigen_solver == "randomized": + self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh( + K, + n_components=n_components, + n_iter=self.iterated_power, + random_state=self.random_state, + selection="module", + ) + + # make sure that the eigenvalues are ok and fix numerical issues + self.eigenvalues_ = _check_psd_eigenvalues( + self.eigenvalues_, enable_warnings=False + ) + + # flip eigenvectors' sign to enforce deterministic output + self.eigenvectors_, _ = svd_flip( + self.eigenvectors_, np.zeros_like(self.eigenvectors_).T + ) + + # sort eigenvectors in descending order + indices = self.eigenvalues_.argsort()[::-1] + self.eigenvalues_ = self.eigenvalues_[indices] + self.eigenvectors_ = self.eigenvectors_[:, indices] + + # remove eigenvectors with a zero eigenvalue (null space) if required + if self.remove_zero_eig or self.n_components is None: + self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0] + self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0] + + # Maintenance note on Eigenvectors normalization + # ---------------------------------------------- + # there is a link between + # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)' + # if v is an eigenvector of K + # then Phi(X)v is an eigenvector of Phi(X)Phi(X)' + # if u is an eigenvector of Phi(X)Phi(X)' + # then Phi(X)'u is an eigenvector of Phi(X)'Phi(X) + # + # At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale + # them so that eigenvectors in kernel feature space (the u) have norm=1 + # instead + # + # We COULD scale them here: + # self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_) + # + # But choose to perform that LATER when needed, in `fit()` and in + # `transform()`. + + return K + + def _fit_inverse_transform(self, X_transformed, X): + if hasattr(X, "tocsr"): + raise NotImplementedError( + "Inverse transform not implemented for sparse matrices!" + ) + + n_samples = X_transformed.shape[0] + K = self._get_kernel(X_transformed) + K.flat[:: n_samples + 1] += self.alpha + self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True) + self.X_transformed_fit_ = X_transformed + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + if self.fit_inverse_transform and self.kernel == "precomputed": + raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.") + X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X) + self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma + self._centerer = KernelCenterer().set_output(transform="default") + K = self._get_kernel(X) + self._fit_transform(K) + + if self.fit_inverse_transform: + # no need to use the kernel to transform X, use shortcut expression + X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) + + self._fit_inverse_transform(X_transformed, X) + + self.X_fit_ = X + return self + + def fit_transform(self, X, y=None, **params): + """Fit the model from data in X and transform X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + **params : kwargs + Parameters (keyword arguments) and values passed to + the fit_transform instance. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + """ + self.fit(X, **params) + + # no need to use the kernel to transform X, use shortcut expression + X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) + + if self.fit_inverse_transform: + self._fit_inverse_transform(X_transformed, X) + + return X_transformed + + def transform(self, X): + """Transform X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + + # Compute centered gram matrix between X and training data X_fit_ + K = self._centerer.transform(self._get_kernel(X, self.X_fit_)) + + # scale eigenvectors (properly account for null-space for dot product) + non_zeros = np.flatnonzero(self.eigenvalues_) + scaled_alphas = np.zeros_like(self.eigenvectors_) + scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt( + self.eigenvalues_[non_zeros] + ) + + # Project with a scalar product between K and the scaled eigenvectors + return np.dot(K, scaled_alphas) + + def inverse_transform(self, X): + """Transform X back to original space. + + ``inverse_transform`` approximates the inverse transformation using + a learned pre-image. The pre-image is learned by kernel ridge + regression of the original data on their low-dimensional representation + vectors. + + .. note: + :meth:`~sklearn.decomposition.fit` internally uses a centered + kernel. As the centered kernel no longer contains the information + of the mean of kernel features, such information is not taken into + account in reconstruction. + + .. note:: + When users want to compute inverse transformation for 'linear' + kernel, it is recommended that they use + :class:`~sklearn.decomposition.PCA` instead. Unlike + :class:`~sklearn.decomposition.PCA`, + :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform`` + does not reconstruct the mean of data when 'linear' kernel is used + due to the use of centered kernel. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_components) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Returns the instance itself. + + References + ---------- + `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. + "Learning to find pre-images." + Advances in neural information processing systems 16 (2004): 449-456. + `_ + """ + if not self.fit_inverse_transform: + raise NotFittedError( + "The fit_inverse_transform parameter was not" + " set to True when instantiating and hence " + "the inverse transform is not available." + ) + + K = self._get_kernel(X, self.X_transformed_fit_) + return np.dot(K, self.dual_coef_) + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + "pairwise": self.kernel == "precomputed", + } + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.eigenvalues_.shape[0] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_lda.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_lda.py new file mode 100644 index 0000000000000000000000000000000000000000..9e161c178b9e327e4a5e6f6f0c0b3ed9c1cbd57f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_lda.py @@ -0,0 +1,929 @@ +""" + +============================================================= +Online Latent Dirichlet Allocation with variational inference +============================================================= + +This implementation is modified from Matthew D. Hoffman's onlineldavb code +Link: https://github.com/blei-lab/onlineldavb +""" + +# Author: Chyi-Kwei Yau +# Author: Matthew D. Hoffman (original onlineldavb implementation) +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from joblib import effective_n_jobs +from scipy.special import gammaln, logsumexp + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..utils import check_random_state, gen_batches, gen_even_slices +from ..utils._param_validation import Interval, StrOptions +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted, check_non_negative +from ._online_lda_fast import ( + _dirichlet_expectation_1d as cy_dirichlet_expectation_1d, +) +from ._online_lda_fast import ( + _dirichlet_expectation_2d, +) +from ._online_lda_fast import ( + mean_change as cy_mean_change, +) + +EPS = np.finfo(float).eps + + +def _update_doc_distribution( + X, + exp_topic_word_distr, + doc_topic_prior, + max_doc_update_iter, + mean_change_tol, + cal_sstats, + random_state, +): + """E-step: update document-topic distribution. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + exp_topic_word_distr : ndarray of shape (n_topics, n_features) + Exponential value of expectation of log topic word distribution. + In the literature, this is `exp(E[log(beta)])`. + + doc_topic_prior : float + Prior of document topic distribution `theta`. + + max_doc_update_iter : int + Max number of iterations for updating document topic distribution in + the E-step. + + mean_change_tol : float + Stopping tolerance for updating document topic distribution in E-step. + + cal_sstats : bool + Parameter that indicate to calculate sufficient statistics or not. + Set `cal_sstats` to `True` when we need to run M-step. + + random_state : RandomState instance or None + Parameter that indicate how to initialize document topic distribution. + Set `random_state` to None will initialize document topic distribution + to a constant number. + + Returns + ------- + (doc_topic_distr, suff_stats) : + `doc_topic_distr` is unnormalized topic distribution for each document. + In the literature, this is `gamma`. we can calculate `E[log(theta)]` + from it. + `suff_stats` is expected sufficient statistics for the M-step. + When `cal_sstats == False`, this will be None. + + """ + is_sparse_x = sp.issparse(X) + n_samples, n_features = X.shape + n_topics = exp_topic_word_distr.shape[0] + + if random_state: + doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype( + X.dtype, copy=False + ) + else: + doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype) + + # In the literature, this is `exp(E[log(theta)])` + exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr)) + + # diff on `component_` (only calculate it when `cal_diff` is True) + suff_stats = ( + np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None + ) + + if is_sparse_x: + X_data = X.data + X_indices = X.indices + X_indptr = X.indptr + + # These cython functions are called in a nested loop on usually very small arrays + # (length=n_topics). In that case, finding the appropriate signature of the + # fused-typed function can be more costly than its execution, hence the dispatch + # is done outside of the loop. + ctype = "float" if X.dtype == np.float32 else "double" + mean_change = cy_mean_change[ctype] + dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype] + eps = np.finfo(X.dtype).eps + + for idx_d in range(n_samples): + if is_sparse_x: + ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]] + cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]] + else: + ids = np.nonzero(X[idx_d, :])[0] + cnts = X[idx_d, ids] + + doc_topic_d = doc_topic_distr[idx_d, :] + # The next one is a copy, since the inner loop overwrites it. + exp_doc_topic_d = exp_doc_topic[idx_d, :].copy() + exp_topic_word_d = exp_topic_word_distr[:, ids] + + # Iterate between `doc_topic_d` and `norm_phi` until convergence + for _ in range(0, max_doc_update_iter): + last_d = doc_topic_d + + # The optimal phi_{dwk} is proportional to + # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]). + norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps + + doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T) + # Note: adds doc_topic_prior to doc_topic_d, in-place. + dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d) + + if mean_change(last_d, doc_topic_d) < mean_change_tol: + break + doc_topic_distr[idx_d, :] = doc_topic_d + + # Contribution of document d to the expected sufficient + # statistics for the M step. + if cal_sstats: + norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps + suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi) + + return (doc_topic_distr, suff_stats) + + +class LatentDirichletAllocation( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Latent Dirichlet Allocation with online variational Bayes algorithm. + + The implementation is based on [1]_ and [2]_. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=10 + Number of topics. + + .. versionchanged:: 0.19 + ``n_topics`` was renamed to ``n_components`` + + doc_topic_prior : float, default=None + Prior of document topic distribution `theta`. If the value is None, + defaults to `1 / n_components`. + In [1]_, this is called `alpha`. + + topic_word_prior : float, default=None + Prior of topic word distribution `beta`. If the value is None, defaults + to `1 / n_components`. + In [1]_, this is called `eta`. + + learning_method : {'batch', 'online'}, default='batch' + Method used to update `_component`. Only used in :meth:`fit` method. + In general, if the data size is large, the online update will be much + faster than the batch update. + + Valid options:: + + 'batch': Batch variational Bayes method. Use all training data in + each EM update. + Old `components_` will be overwritten in each iteration. + 'online': Online variational Bayes method. In each EM update, use + mini-batch of training data to update the ``components_`` + variable incrementally. The learning rate is controlled by the + ``learning_decay`` and the ``learning_offset`` parameters. + + .. versionchanged:: 0.20 + The default learning method is now ``"batch"``. + + learning_decay : float, default=0.7 + It is a parameter that control learning rate in the online learning + method. The value should be set between (0.5, 1.0] to guarantee + asymptotic convergence. When the value is 0.0 and batch_size is + ``n_samples``, the update method is same as batch learning. In the + literature, this is called kappa. + + learning_offset : float, default=10.0 + A (positive) parameter that downweights early iterations in online + learning. It should be greater than 1.0. In the literature, this is + called tau_0. + + max_iter : int, default=10 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the :meth:`fit` method, and not the + :meth:`partial_fit` method. + + batch_size : int, default=128 + Number of documents to use in each EM iteration. Only used in online + learning. + + evaluate_every : int, default=-1 + How often to evaluate perplexity. Only used in `fit` method. + set it to 0 or negative number to not evaluate perplexity in + training at all. Evaluating perplexity can help you check convergence + in training process, but it will also increase total training time. + Evaluating perplexity in every iteration might increase training time + up to two-fold. + + total_samples : int, default=1e6 + Total number of documents. Only used in the :meth:`partial_fit` method. + + perp_tol : float, default=1e-1 + Perplexity tolerance in batch learning. Only used when + ``evaluate_every`` is greater than 0. + + mean_change_tol : float, default=1e-3 + Stopping tolerance for updating document topic distribution in E-step. + + max_doc_update_iter : int, default=100 + Max number of iterations for updating document topic distribution in + the E-step. + + n_jobs : int, default=None + The number of jobs to use in the E-step. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + Verbosity level. + + random_state : int, RandomState instance or None, default=None + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Variational parameters for topic word distribution. Since the complete + conditional for topic word distribution is a Dirichlet, + ``components_[i, j]`` can be viewed as pseudocount that represents the + number of times word `j` was assigned to topic `i`. + It can also be viewed as distribution over the words for each topic + after normalization: + ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``. + + exp_dirichlet_component_ : ndarray of shape (n_components, n_features) + Exponential value of expectation of log topic word distribution. + In the literature, this is `exp(E[log(beta)])`. + + n_batch_iter_ : int + Number of iterations of the EM step. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of passes over the dataset. + + bound_ : float + Final perplexity score on training set. + + doc_topic_prior_ : float + Prior of document topic distribution `theta`. If the value is None, + it is `1 / n_components`. + + random_state_ : RandomState instance + RandomState instance that is generated either from a seed, the random + number generator or by `np.random`. + + topic_word_prior_ : float + Prior of topic word distribution `beta`. If the value is None, it is + `1 / n_components`. + + See Also + -------- + sklearn.discriminant_analysis.LinearDiscriminantAnalysis: + A classifier with a linear decision boundary, generated by fitting + class conditional densities to the data and using Bayes' rule. + + References + ---------- + .. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D. + Hoffman, David M. Blei, Francis Bach, 2010 + https://github.com/blei-lab/onlineldavb + + .. [2] "Stochastic Variational Inference", Matthew D. Hoffman, + David M. Blei, Chong Wang, John Paisley, 2013 + + Examples + -------- + >>> from sklearn.decomposition import LatentDirichletAllocation + >>> from sklearn.datasets import make_multilabel_classification + >>> # This produces a feature matrix of token counts, similar to what + >>> # CountVectorizer would produce on text. + >>> X, _ = make_multilabel_classification(random_state=0) + >>> lda = LatentDirichletAllocation(n_components=5, + ... random_state=0) + >>> lda.fit(X) + LatentDirichletAllocation(...) + >>> # get topics for some given samples: + >>> lda.transform(X[-2:]) + array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846], + [0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]]) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 0, None, closed="neither")], + "doc_topic_prior": [None, Interval(Real, 0, 1, closed="both")], + "topic_word_prior": [None, Interval(Real, 0, 1, closed="both")], + "learning_method": [StrOptions({"batch", "online"})], + "learning_decay": [Interval(Real, 0, 1, closed="both")], + "learning_offset": [Interval(Real, 1.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "batch_size": [Interval(Integral, 0, None, closed="neither")], + "evaluate_every": [Interval(Integral, None, None, closed="neither")], + "total_samples": [Interval(Real, 0, None, closed="neither")], + "perp_tol": [Interval(Real, 0, None, closed="left")], + "mean_change_tol": [Interval(Real, 0, None, closed="left")], + "max_doc_update_iter": [Interval(Integral, 0, None, closed="left")], + "n_jobs": [None, Integral], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=10, + *, + doc_topic_prior=None, + topic_word_prior=None, + learning_method="batch", + learning_decay=0.7, + learning_offset=10.0, + max_iter=10, + batch_size=128, + evaluate_every=-1, + total_samples=1e6, + perp_tol=1e-1, + mean_change_tol=1e-3, + max_doc_update_iter=100, + n_jobs=None, + verbose=0, + random_state=None, + ): + self.n_components = n_components + self.doc_topic_prior = doc_topic_prior + self.topic_word_prior = topic_word_prior + self.learning_method = learning_method + self.learning_decay = learning_decay + self.learning_offset = learning_offset + self.max_iter = max_iter + self.batch_size = batch_size + self.evaluate_every = evaluate_every + self.total_samples = total_samples + self.perp_tol = perp_tol + self.mean_change_tol = mean_change_tol + self.max_doc_update_iter = max_doc_update_iter + self.n_jobs = n_jobs + self.verbose = verbose + self.random_state = random_state + + def _init_latent_vars(self, n_features, dtype=np.float64): + """Initialize latent variables.""" + + self.random_state_ = check_random_state(self.random_state) + self.n_batch_iter_ = 1 + self.n_iter_ = 0 + + if self.doc_topic_prior is None: + self.doc_topic_prior_ = 1.0 / self.n_components + else: + self.doc_topic_prior_ = self.doc_topic_prior + + if self.topic_word_prior is None: + self.topic_word_prior_ = 1.0 / self.n_components + else: + self.topic_word_prior_ = self.topic_word_prior + + init_gamma = 100.0 + init_var = 1.0 / init_gamma + # In the literature, this is called `lambda` + self.components_ = self.random_state_.gamma( + init_gamma, init_var, (self.n_components, n_features) + ).astype(dtype, copy=False) + + # In the literature, this is `exp(E[log(beta)])` + self.exp_dirichlet_component_ = np.exp( + _dirichlet_expectation_2d(self.components_) + ) + + def _e_step(self, X, cal_sstats, random_init, parallel=None): + """E-step in EM update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + cal_sstats : bool + Parameter that indicate whether to calculate sufficient statistics + or not. Set ``cal_sstats`` to True when we need to run M-step. + + random_init : bool + Parameter that indicate whether to initialize document topic + distribution randomly in the E-step. Set it to True in training + steps. + + parallel : joblib.Parallel, default=None + Pre-initialized instance of joblib.Parallel. + + Returns + ------- + (doc_topic_distr, suff_stats) : + `doc_topic_distr` is unnormalized topic distribution for each + document. In the literature, this is called `gamma`. + `suff_stats` is expected sufficient statistics for the M-step. + When `cal_sstats == False`, it will be None. + + """ + + # Run e-step in parallel + random_state = self.random_state_ if random_init else None + + # TODO: make Parallel._effective_n_jobs public instead? + n_jobs = effective_n_jobs(self.n_jobs) + if parallel is None: + parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) + results = parallel( + delayed(_update_doc_distribution)( + X[idx_slice, :], + self.exp_dirichlet_component_, + self.doc_topic_prior_, + self.max_doc_update_iter, + self.mean_change_tol, + cal_sstats, + random_state, + ) + for idx_slice in gen_even_slices(X.shape[0], n_jobs) + ) + + # merge result + doc_topics, sstats_list = zip(*results) + doc_topic_distr = np.vstack(doc_topics) + + if cal_sstats: + # This step finishes computing the sufficient statistics for the + # M-step. + suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype) + for sstats in sstats_list: + suff_stats += sstats + suff_stats *= self.exp_dirichlet_component_ + else: + suff_stats = None + + return (doc_topic_distr, suff_stats) + + def _em_step(self, X, total_samples, batch_update, parallel=None): + """EM update for 1 iteration. + + update `_component` by batch VB or online VB. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + total_samples : int + Total number of documents. It is only used when + batch_update is `False`. + + batch_update : bool + Parameter that controls updating method. + `True` for batch learning, `False` for online learning. + + parallel : joblib.Parallel, default=None + Pre-initialized instance of joblib.Parallel + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Unnormalized document topic distribution. + """ + + # E-step + _, suff_stats = self._e_step( + X, cal_sstats=True, random_init=True, parallel=parallel + ) + + # M-step + if batch_update: + self.components_ = self.topic_word_prior_ + suff_stats + else: + # online update + # In the literature, the weight is `rho` + weight = np.power( + self.learning_offset + self.n_batch_iter_, -self.learning_decay + ) + doc_ratio = float(total_samples) / X.shape[0] + self.components_ *= 1 - weight + self.components_ += weight * ( + self.topic_word_prior_ + doc_ratio * suff_stats + ) + + # update `component_` related variables + self.exp_dirichlet_component_ = np.exp( + _dirichlet_expectation_2d(self.components_) + ) + self.n_batch_iter_ += 1 + return + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + "requires_positive_X": True, + } + + def _check_non_neg_array(self, X, reset_n_features, whom): + """check X format + + check X format and make sure no negative value in X. + + Parameters + ---------- + X : array-like or sparse matrix + + """ + dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype + + X = self._validate_data( + X, + reset=reset_n_features, + accept_sparse="csr", + dtype=dtype, + ) + check_non_negative(X, whom) + + return X + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Online VB with Mini-Batch update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Partially fitted estimator. + """ + first_time = not hasattr(self, "components_") + + X = self._check_non_neg_array( + X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit" + ) + n_samples, n_features = X.shape + batch_size = self.batch_size + + # initialize parameters or check + if first_time: + self._init_latent_vars(n_features, dtype=X.dtype) + + if n_features != self.components_.shape[1]: + raise ValueError( + "The provided data has %d dimensions while " + "the model was trained with feature size %d." + % (n_features, self.components_.shape[1]) + ) + + n_jobs = effective_n_jobs(self.n_jobs) + with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: + for idx_slice in gen_batches(n_samples, batch_size): + self._em_step( + X[idx_slice, :], + total_samples=self.total_samples, + batch_update=False, + parallel=parallel, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn model for the data X with variational Bayes method. + + When `learning_method` is 'online', use mini-batch update. + Otherwise, use batch update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Fitted estimator. + """ + X = self._check_non_neg_array( + X, reset_n_features=True, whom="LatentDirichletAllocation.fit" + ) + n_samples, n_features = X.shape + max_iter = self.max_iter + evaluate_every = self.evaluate_every + learning_method = self.learning_method + + batch_size = self.batch_size + + # initialize parameters + self._init_latent_vars(n_features, dtype=X.dtype) + # change to perplexity later + last_bound = None + n_jobs = effective_n_jobs(self.n_jobs) + with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: + for i in range(max_iter): + if learning_method == "online": + for idx_slice in gen_batches(n_samples, batch_size): + self._em_step( + X[idx_slice, :], + total_samples=n_samples, + batch_update=False, + parallel=parallel, + ) + else: + # batch update + self._em_step( + X, total_samples=n_samples, batch_update=True, parallel=parallel + ) + + # check perplexity + if evaluate_every > 0 and (i + 1) % evaluate_every == 0: + doc_topics_distr, _ = self._e_step( + X, cal_sstats=False, random_init=False, parallel=parallel + ) + bound = self._perplexity_precomp_distr( + X, doc_topics_distr, sub_sampling=False + ) + if self.verbose: + print( + "iteration: %d of max_iter: %d, perplexity: %.4f" + % (i + 1, max_iter, bound) + ) + + if last_bound and abs(last_bound - bound) < self.perp_tol: + break + last_bound = bound + + elif self.verbose: + print("iteration: %d of max_iter: %d" % (i + 1, max_iter)) + self.n_iter_ += 1 + + # calculate final perplexity value on train set + doc_topics_distr, _ = self._e_step( + X, cal_sstats=False, random_init=False, parallel=parallel + ) + self.bound_ = self._perplexity_precomp_distr( + X, doc_topics_distr, sub_sampling=False + ) + + return self + + def _unnormalized_transform(self, X): + """Transform data X according to fitted model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution for X. + """ + doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False) + + return doc_topic_distr + + def transform(self, X): + """Transform data X according to the fitted model. + + .. versionchanged:: 0.18 + *doc_topic_distr* is now normalized + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution for X. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=False, whom="LatentDirichletAllocation.transform" + ) + doc_topic_distr = self._unnormalized_transform(X) + doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis] + return doc_topic_distr + + def _approx_bound(self, X, doc_topic_distr, sub_sampling): + """Estimate the variational bound. + + Estimate the variational bound over "all documents" using only the + documents passed in as X. Since log-likelihood of each word cannot + be computed directly, we use this bound to estimate it. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution. In the literature, this is called + gamma. + + sub_sampling : bool, default=False + Compensate for subsampling of documents. + It is used in calculate bound in online learning. + + Returns + ------- + score : float + + """ + + def _loglikelihood(prior, distr, dirichlet_distr, size): + # calculate log-likelihood + score = np.sum((prior - distr) * dirichlet_distr) + score += np.sum(gammaln(distr) - gammaln(prior)) + score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1))) + return score + + is_sparse_x = sp.issparse(X) + n_samples, n_components = doc_topic_distr.shape + n_features = self.components_.shape[1] + score = 0 + + dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr) + dirichlet_component_ = _dirichlet_expectation_2d(self.components_) + doc_topic_prior = self.doc_topic_prior_ + topic_word_prior = self.topic_word_prior_ + + if is_sparse_x: + X_data = X.data + X_indices = X.indices + X_indptr = X.indptr + + # E[log p(docs | theta, beta)] + for idx_d in range(0, n_samples): + if is_sparse_x: + ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]] + cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]] + else: + ids = np.nonzero(X[idx_d, :])[0] + cnts = X[idx_d, ids] + temp = ( + dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids] + ) + norm_phi = logsumexp(temp, axis=0) + score += np.dot(cnts, norm_phi) + + # compute E[log p(theta | alpha) - log q(theta | gamma)] + score += _loglikelihood( + doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components + ) + + # Compensate for the subsampling of the population of documents + if sub_sampling: + doc_ratio = float(self.total_samples) / n_samples + score *= doc_ratio + + # E[log p(beta | eta) - log q (beta | lambda)] + score += _loglikelihood( + topic_word_prior, self.components_, dirichlet_component_, n_features + ) + + return score + + def score(self, X, y=None): + """Calculate approximate log-likelihood as score. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + score : float + Use approximate bound as score. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=False, whom="LatentDirichletAllocation.score" + ) + + doc_topic_distr = self._unnormalized_transform(X) + score = self._approx_bound(X, doc_topic_distr, sub_sampling=False) + return score + + def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False): + """Calculate approximate perplexity for data X with ability to accept + precomputed doc_topic_distr + + Perplexity is defined as exp(-1. * log-likelihood per word) + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + doc_topic_distr : ndarray of shape (n_samples, n_components), \ + default=None + Document topic distribution. + If it is None, it will be generated by applying transform on X. + + Returns + ------- + score : float + Perplexity score. + """ + if doc_topic_distr is None: + doc_topic_distr = self._unnormalized_transform(X) + else: + n_samples, n_components = doc_topic_distr.shape + if n_samples != X.shape[0]: + raise ValueError( + "Number of samples in X and doc_topic_distr do not match." + ) + + if n_components != self.n_components: + raise ValueError("Number of topics does not match.") + + current_samples = X.shape[0] + bound = self._approx_bound(X, doc_topic_distr, sub_sampling) + + if sub_sampling: + word_cnt = X.sum() * (float(self.total_samples) / current_samples) + else: + word_cnt = X.sum() + perword_bound = bound / word_cnt + + return np.exp(-1.0 * perword_bound) + + def perplexity(self, X, sub_sampling=False): + """Calculate approximate perplexity for data X. + + Perplexity is defined as exp(-1. * log-likelihood per word) + + .. versionchanged:: 0.19 + *doc_topic_distr* argument has been deprecated and is ignored + because user no longer has access to unnormalized distribution + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + sub_sampling : bool + Do sub-sampling or not. + + Returns + ------- + score : float + Perplexity score. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity" + ) + return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling) + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py new file mode 100644 index 0000000000000000000000000000000000000000..db46540e26708b897ed8389efa669c5312e729f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py @@ -0,0 +1,2443 @@ +""" Non-negative matrix factorization. +""" +# Author: Vlad Niculae +# Lars Buitinck +# Mathieu Blondel +# Tom Dupre la Tour +# License: BSD 3 clause + +import itertools +import time +import warnings +from abc import ABC +from math import sqrt +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy import linalg + +from .._config import config_context +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_array, check_random_state, gen_batches, metadata_routing +from ..utils._param_validation import ( + Hidden, + Interval, + StrOptions, + validate_params, +) +from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm +from ..utils.validation import ( + check_is_fitted, + check_non_negative, +) +from ._cdnmf_fast import _update_cdnmf_fast + +EPSILON = np.finfo(np.float32).eps + + +def norm(x): + """Dot product-based Euclidean norm implementation. + + See: http://fa.bianp.net/blog/2011/computing-the-vector-norm/ + + Parameters + ---------- + x : array-like + Vector for which to compute the norm. + """ + return sqrt(squared_norm(x)) + + +def trace_dot(X, Y): + """Trace of np.dot(X, Y.T). + + Parameters + ---------- + X : array-like + First matrix. + Y : array-like + Second matrix. + """ + return np.dot(X.ravel(), Y.ravel()) + + +def _check_init(A, shape, whom): + A = check_array(A) + if shape[0] != "auto" and A.shape[0] != shape[0]: + raise ValueError( + f"Array with wrong first dimension passed to {whom}. Expected {shape[0]}, " + f"but got {A.shape[0]}." + ) + if shape[1] != "auto" and A.shape[1] != shape[1]: + raise ValueError( + f"Array with wrong second dimension passed to {whom}. Expected {shape[1]}, " + f"but got {A.shape[1]}." + ) + check_non_negative(A, whom) + if np.max(A) == 0: + raise ValueError(f"Array passed to {whom} is full of zeros.") + + +def _beta_divergence(X, W, H, beta, square_root=False): + """Compute the beta-divergence of X and dot(W, H). + + Parameters + ---------- + X : float or array-like of shape (n_samples, n_features) + + W : float or array-like of shape (n_samples, n_components) + + H : float or array-like of shape (n_components, n_features) + + beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'} + Parameter of the beta-divergence. + If beta == 2, this is half the Frobenius *squared* norm. + If beta == 1, this is the generalized Kullback-Leibler divergence. + If beta == 0, this is the Itakura-Saito divergence. + Else, this is the general beta-divergence. + + square_root : bool, default=False + If True, return np.sqrt(2 * res) + For beta == 2, it corresponds to the Frobenius norm. + + Returns + ------- + res : float + Beta divergence of X and np.dot(X, H). + """ + beta = _beta_loss_to_float(beta) + + # The method can be called with scalars + if not sp.issparse(X): + X = np.atleast_2d(X) + W = np.atleast_2d(W) + H = np.atleast_2d(H) + + # Frobenius norm + if beta == 2: + # Avoid the creation of the dense np.dot(W, H) if X is sparse. + if sp.issparse(X): + norm_X = np.dot(X.data, X.data) + norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H) + cross_prod = trace_dot((X @ H.T), W) + res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0 + else: + res = squared_norm(X - np.dot(W, H)) / 2.0 + + if square_root: + return np.sqrt(res * 2) + else: + return res + + if sp.issparse(X): + # compute np.dot(W, H) only where X is nonzero + WH_data = _special_sparse_dot(W, H, X).data + X_data = X.data + else: + WH = np.dot(W, H) + WH_data = WH.ravel() + X_data = X.ravel() + + # do not affect the zeros: here 0 ** (-1) = 0 and not infinity + indices = X_data > EPSILON + WH_data = WH_data[indices] + X_data = X_data[indices] + + # used to avoid division by zero + WH_data[WH_data < EPSILON] = EPSILON + + # generalized Kullback-Leibler divergence + if beta == 1: + # fast and memory efficient computation of np.sum(np.dot(W, H)) + sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) + # computes np.sum(X * log(X / WH)) only where X is nonzero + div = X_data / WH_data + res = np.dot(X_data, np.log(div)) + # add full np.sum(np.dot(W, H)) - np.sum(X) + res += sum_WH - X_data.sum() + + # Itakura-Saito divergence + elif beta == 0: + div = X_data / WH_data + res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div)) + + # beta-divergence, beta not in (0, 1, 2) + else: + if sp.issparse(X): + # slow loop, but memory efficient computation of : + # np.sum(np.dot(W, H) ** beta) + sum_WH_beta = 0 + for i in range(X.shape[1]): + sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) + + else: + sum_WH_beta = np.sum(WH**beta) + + sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) + res = (X_data**beta).sum() - beta * sum_X_WH + res += sum_WH_beta * (beta - 1) + res /= beta * (beta - 1) + + if square_root: + res = max(res, 0) # avoid negative number due to rounding errors + return np.sqrt(2 * res) + else: + return res + + +def _special_sparse_dot(W, H, X): + """Computes np.dot(W, H), only where X is non zero.""" + if sp.issparse(X): + ii, jj = X.nonzero() + n_vals = ii.shape[0] + dot_vals = np.empty(n_vals) + n_components = W.shape[1] + + batch_size = max(n_components, n_vals // n_components) + for start in range(0, n_vals, batch_size): + batch = slice(start, start + batch_size) + dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum( + axis=1 + ) + + WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape) + return WH.tocsr() + else: + return np.dot(W, H) + + +def _beta_loss_to_float(beta_loss): + """Convert string beta_loss to float.""" + beta_loss_map = {"frobenius": 2, "kullback-leibler": 1, "itakura-saito": 0} + if isinstance(beta_loss, str): + beta_loss = beta_loss_map[beta_loss] + return beta_loss + + +def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None): + """Algorithms for NMF initialization. + + Computes an initial guess for the non-negative + rank k matrix approximation for X: X = WH. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix to be decomposed. + + n_components : int + The number of components desired in the approximation. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None + Method used to initialize the procedure. + Valid options: + + - None: 'nndsvda' if n_components <= min(n_samples, n_features), + otherwise 'random'. + + - 'random': non-negative random matrices, scaled with: + sqrt(X.mean() / n_components) + + - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + + - 'nndsvda': NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + + - 'nndsvdar': NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + + - 'custom': use custom matrices W and H + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + eps : float, default=1e-6 + Truncate all values less then this in output to zero. + + random_state : int, RandomState instance or None, default=None + Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + W : array-like of shape (n_samples, n_components) + Initial guesses for solving X ~= WH. + + H : array-like of shape (n_components, n_features) + Initial guesses for solving X ~= WH. + + References + ---------- + C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for + nonnegative matrix factorization - Pattern Recognition, 2008 + http://tinyurl.com/nndsvd + """ + check_non_negative(X, "NMF initialization") + n_samples, n_features = X.shape + + if ( + init is not None + and init != "random" + and n_components > min(n_samples, n_features) + ): + raise ValueError( + "init = '{}' can only be used when " + "n_components <= min(n_samples, n_features)".format(init) + ) + + if init is None: + if n_components <= min(n_samples, n_features): + init = "nndsvda" + else: + init = "random" + + # Random initialization + if init == "random": + avg = np.sqrt(X.mean() / n_components) + rng = check_random_state(random_state) + H = avg * rng.standard_normal(size=(n_components, n_features)).astype( + X.dtype, copy=False + ) + W = avg * rng.standard_normal(size=(n_samples, n_components)).astype( + X.dtype, copy=False + ) + np.abs(H, out=H) + np.abs(W, out=W) + return W, H + + # NNDSVD initialization + U, S, V = randomized_svd(X, n_components, random_state=random_state) + W = np.zeros_like(U) + H = np.zeros_like(V) + + # The leading singular triplet is non-negative + # so it can be used as is for initialization. + W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0]) + H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :]) + + for j in range(1, n_components): + x, y = U[:, j], V[j, :] + + # extract positive and negative parts of column vectors + x_p, y_p = np.maximum(x, 0), np.maximum(y, 0) + x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0)) + + # and their norms + x_p_nrm, y_p_nrm = norm(x_p), norm(y_p) + x_n_nrm, y_n_nrm = norm(x_n), norm(y_n) + + m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm + + # choose update + if m_p > m_n: + u = x_p / x_p_nrm + v = y_p / y_p_nrm + sigma = m_p + else: + u = x_n / x_n_nrm + v = y_n / y_n_nrm + sigma = m_n + + lbd = np.sqrt(S[j] * sigma) + W[:, j] = lbd * u + H[j, :] = lbd * v + + W[W < eps] = 0 + H[H < eps] = 0 + + if init == "nndsvd": + pass + elif init == "nndsvda": + avg = X.mean() + W[W == 0] = avg + H[H == 0] = avg + elif init == "nndsvdar": + rng = check_random_state(random_state) + avg = X.mean() + W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100) + H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100) + else: + raise ValueError( + "Invalid init parameter: got %r instead of one of %r" + % (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar")) + ) + + return W, H + + +def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state): + """Helper function for _fit_coordinate_descent. + + Update W to minimize the objective function, iterating once over all + coordinates. By symmetry, to update H, one can call + _update_coordinate_descent(X.T, Ht, W, ...). + + """ + n_components = Ht.shape[1] + + HHt = np.dot(Ht.T, Ht) + XHt = safe_sparse_dot(X, Ht) + + # L2 regularization corresponds to increase of the diagonal of HHt + if l2_reg != 0.0: + # adds l2_reg only on the diagonal + HHt.flat[:: n_components + 1] += l2_reg + # L1 regularization corresponds to decrease of each element of XHt + if l1_reg != 0.0: + XHt -= l1_reg + + if shuffle: + permutation = random_state.permutation(n_components) + else: + permutation = np.arange(n_components) + # The following seems to be required on 64-bit Windows w/ Python 3.5. + permutation = np.asarray(permutation, dtype=np.intp) + return _update_cdnmf_fast(W, HHt, XHt, permutation) + + +def _fit_coordinate_descent( + X, + W, + H, + tol=1e-4, + max_iter=200, + l1_reg_W=0, + l1_reg_H=0, + l2_reg_W=0, + l2_reg_H=0, + update_H=True, + verbose=0, + shuffle=False, + random_state=None, +): + """Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent + + The objective function is minimized with an alternating minimization of W + and H. Each minimization is done with a cyclic (up to a permutation of the + features) Coordinate Descent. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Constant matrix. + + W : array-like of shape (n_samples, n_components) + Initial guess for the solution. + + H : array-like of shape (n_components, n_features) + Initial guess for the solution. + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + l1_reg_W : float, default=0. + L1 regularization parameter for W. + + l1_reg_H : float, default=0. + L1 regularization parameter for H. + + l2_reg_W : float, default=0. + L2 regularization parameter for W. + + l2_reg_H : float, default=0. + L2 regularization parameter for H. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + verbose : int, default=0 + The verbosity level. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + random_state : int, RandomState instance or None, default=None + Used to randomize the coordinates in the CD solver, when + ``shuffle`` is set to ``True``. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + The number of iterations done by the algorithm. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + """ + # so W and Ht are both in C order in memory + Ht = check_array(H.T, order="C") + X = check_array(X, accept_sparse="csr") + + rng = check_random_state(random_state) + + for n_iter in range(1, max_iter + 1): + violation = 0.0 + + # Update W + violation += _update_coordinate_descent( + X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng + ) + # Update H + if update_H: + violation += _update_coordinate_descent( + X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng + ) + + if n_iter == 1: + violation_init = violation + + if violation_init == 0: + break + + if verbose: + print("violation:", violation / violation_init) + + if violation / violation_init <= tol: + if verbose: + print("Converged at iteration", n_iter + 1) + break + + return W, Ht.T, n_iter + + +def _multiplicative_update_w( + X, + W, + H, + beta_loss, + l1_reg_W, + l2_reg_W, + gamma, + H_sum=None, + HHt=None, + XHt=None, + update_H=True, +): + """Update W in Multiplicative Update NMF.""" + if beta_loss == 2: + # Numerator + if XHt is None: + XHt = safe_sparse_dot(X, H.T) + if update_H: + # avoid a copy of XHt, which will be re-computed (update_H=True) + numerator = XHt + else: + # preserve the XHt, which is not re-computed (update_H=False) + numerator = XHt.copy() + + # Denominator + if HHt is None: + HHt = np.dot(H, H.T) + denominator = np.dot(W, HHt) + + else: + # Numerator + # if X is sparse, compute WH only where X is non zero + WH_safe_X = _special_sparse_dot(W, H, X) + if sp.issparse(X): + WH_safe_X_data = WH_safe_X.data + X_data = X.data + else: + WH_safe_X_data = WH_safe_X + X_data = X + # copy used in the Denominator + WH = WH_safe_X.copy() + if beta_loss - 1.0 < 0: + WH[WH < EPSILON] = EPSILON + + # to avoid taking a negative power of zero + if beta_loss - 2.0 < 0: + WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON + + if beta_loss == 1: + np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) + elif beta_loss == 0: + # speeds up computation time + # refer to /numpy/numpy/issues/9363 + WH_safe_X_data **= -1 + WH_safe_X_data **= 2 + # element-wise multiplication + WH_safe_X_data *= X_data + else: + WH_safe_X_data **= beta_loss - 2 + # element-wise multiplication + WH_safe_X_data *= X_data + + # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T) + numerator = safe_sparse_dot(WH_safe_X, H.T) + + # Denominator + if beta_loss == 1: + if H_sum is None: + H_sum = np.sum(H, axis=1) # shape(n_components, ) + denominator = H_sum[np.newaxis, :] + + else: + # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T) + if sp.issparse(X): + # memory efficient computation + # (compute row by row, avoiding the dense matrix WH) + WHHt = np.empty(W.shape) + for i in range(X.shape[0]): + WHi = np.dot(W[i, :], H) + if beta_loss - 1 < 0: + WHi[WHi < EPSILON] = EPSILON + WHi **= beta_loss - 1 + WHHt[i, :] = np.dot(WHi, H.T) + else: + WH **= beta_loss - 1 + WHHt = np.dot(WH, H.T) + denominator = WHHt + + # Add L1 and L2 regularization + if l1_reg_W > 0: + denominator += l1_reg_W + if l2_reg_W > 0: + denominator = denominator + l2_reg_W * W + denominator[denominator == 0] = EPSILON + + numerator /= denominator + delta_W = numerator + + # gamma is in ]0, 1] + if gamma != 1: + delta_W **= gamma + + W *= delta_W + + return W, H_sum, HHt, XHt + + +def _multiplicative_update_h( + X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None +): + """update H in Multiplicative Update NMF.""" + if beta_loss == 2: + numerator = safe_sparse_dot(W.T, X) + denominator = np.linalg.multi_dot([W.T, W, H]) + + else: + # Numerator + WH_safe_X = _special_sparse_dot(W, H, X) + if sp.issparse(X): + WH_safe_X_data = WH_safe_X.data + X_data = X.data + else: + WH_safe_X_data = WH_safe_X + X_data = X + # copy used in the Denominator + WH = WH_safe_X.copy() + if beta_loss - 1.0 < 0: + WH[WH < EPSILON] = EPSILON + + # to avoid division by zero + if beta_loss - 2.0 < 0: + WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON + + if beta_loss == 1: + np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) + elif beta_loss == 0: + # speeds up computation time + # refer to /numpy/numpy/issues/9363 + WH_safe_X_data **= -1 + WH_safe_X_data **= 2 + # element-wise multiplication + WH_safe_X_data *= X_data + else: + WH_safe_X_data **= beta_loss - 2 + # element-wise multiplication + WH_safe_X_data *= X_data + + # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X) + numerator = safe_sparse_dot(W.T, WH_safe_X) + + # Denominator + if beta_loss == 1: + W_sum = np.sum(W, axis=0) # shape(n_components, ) + W_sum[W_sum == 0] = 1.0 + denominator = W_sum[:, np.newaxis] + + # beta_loss not in (1, 2) + else: + # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1) + if sp.issparse(X): + # memory efficient computation + # (compute column by column, avoiding the dense matrix WH) + WtWH = np.empty(H.shape) + for i in range(X.shape[1]): + WHi = np.dot(W, H[:, i]) + if beta_loss - 1 < 0: + WHi[WHi < EPSILON] = EPSILON + WHi **= beta_loss - 1 + WtWH[:, i] = np.dot(W.T, WHi) + else: + WH **= beta_loss - 1 + WtWH = np.dot(W.T, WH) + denominator = WtWH + + # Add L1 and L2 regularization + if l1_reg_H > 0: + denominator += l1_reg_H + if l2_reg_H > 0: + denominator = denominator + l2_reg_H * H + denominator[denominator == 0] = EPSILON + + if A is not None and B is not None: + # Updates for the online nmf + if gamma != 1: + H **= 1 / gamma + numerator *= H + A *= rho + B *= rho + A += numerator + B += denominator + H = A / B + + if gamma != 1: + H **= gamma + else: + delta_H = numerator + delta_H /= denominator + if gamma != 1: + delta_H **= gamma + H *= delta_H + + return H + + +def _fit_multiplicative_update( + X, + W, + H, + beta_loss="frobenius", + max_iter=200, + tol=1e-4, + l1_reg_W=0, + l1_reg_H=0, + l2_reg_W=0, + l2_reg_H=0, + update_H=True, + verbose=0, +): + """Compute Non-negative Matrix Factorization with Multiplicative Update. + + The objective function is _beta_divergence(X, WH) and is minimized with an + alternating minimization of W and H. Each minimization is done with a + Multiplicative Update. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Constant input matrix. + + W : array-like of shape (n_samples, n_components) + Initial guess for the solution. + + H : array-like of shape (n_components, n_features) + Initial guess for the solution. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. + + max_iter : int, default=200 + Number of iterations. + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + l1_reg_W : float, default=0. + L1 regularization parameter for W. + + l1_reg_H : float, default=0. + L1 regularization parameter for H. + + l2_reg_W : float, default=0. + L2 regularization parameter for W. + + l2_reg_H : float, default=0. + L2 regularization parameter for H. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + verbose : int, default=0 + The verbosity level. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + The number of iterations done by the algorithm. + + References + ---------- + Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix + Factorization. Adv. Neural Inform. Process. Syst.. 13. + Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix + factorization with the beta-divergence. Neural Computation, 23(9). + """ + start_time = time.time() + + beta_loss = _beta_loss_to_float(beta_loss) + + # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] + if beta_loss < 1: + gamma = 1.0 / (2.0 - beta_loss) + elif beta_loss > 2: + gamma = 1.0 / (beta_loss - 1.0) + else: + gamma = 1.0 + + # used for the convergence criterion + error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True) + previous_error = error_at_init + + H_sum, HHt, XHt = None, None, None + for n_iter in range(1, max_iter + 1): + # update W + # H_sum, HHt and XHt are saved and reused if not update_H + W, H_sum, HHt, XHt = _multiplicative_update_w( + X, + W, + H, + beta_loss=beta_loss, + l1_reg_W=l1_reg_W, + l2_reg_W=l2_reg_W, + gamma=gamma, + H_sum=H_sum, + HHt=HHt, + XHt=XHt, + update_H=update_H, + ) + + # necessary for stability with beta_loss < 1 + if beta_loss < 1: + W[W < np.finfo(np.float64).eps] = 0.0 + + # update H (only at fit or fit_transform) + if update_H: + H = _multiplicative_update_h( + X, + W, + H, + beta_loss=beta_loss, + l1_reg_H=l1_reg_H, + l2_reg_H=l2_reg_H, + gamma=gamma, + ) + + # These values will be recomputed since H changed + H_sum, HHt, XHt = None, None, None + + # necessary for stability with beta_loss < 1 + if beta_loss <= 1: + H[H < np.finfo(np.float64).eps] = 0.0 + + # test convergence criterion every 10 iterations + if tol > 0 and n_iter % 10 == 0: + error = _beta_divergence(X, W, H, beta_loss, square_root=True) + + if verbose: + iter_time = time.time() + print( + "Epoch %02d reached after %.3f seconds, error: %f" + % (n_iter, iter_time - start_time, error) + ) + + if (previous_error - error) / error_at_init < tol: + break + previous_error = error + + # do not print if we have already printed in the convergence test + if verbose and (tol == 0 or n_iter % 10 != 0): + end_time = time.time() + print( + "Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time) + ) + + return W, H, n_iter + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "W": ["array-like", None], + "H": ["array-like", None], + "update_H": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def non_negative_factorization( + X, + W=None, + H=None, + n_components="warn", + *, + init=None, + update_H=True, + solver="cd", + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + random_state=None, + verbose=0, + shuffle=False, +): + """Compute Non-negative Matrix Factorization (NMF). + + Find two non-negative matrices (W, H) whose product approximates the non- + negative matrix X. This factorization can be used for example for + dimensionality reduction, source separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}^2` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The regularization terms are scaled by `n_features` for `W` and by `n_samples` for + `H` to keep their impact balanced with respect to one another and to the data fit + term as independent as possible of the size `n_samples` of the training set. + + The objective function is minimized with an alternating minimization of W + and H. If H is given and update_H=False, it solves for W only. + + Note that the transformed data is named W and the components matrix is named H. In + the NMF literature, the naming convention is usually the opposite since the data + matrix X is transposed. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Constant matrix. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + n_components : int or {'auto'} or None, default=None + Number of components, if n_components is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from `W` or `H` shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + + Valid options: + + - None: 'nndsvda' if n_components < n_features, otherwise 'random'. + - 'random': non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + - 'nndsvda': NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + - 'nndsvdar': NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + - 'custom': If `update_H=True`, use custom matrices W and H which must both + be provided. If `update_H=False`, then only custom matrix H is used. + + .. versionchanged:: 0.23 + The default value of `init` changed from 'random' to None in 0.23. + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + solver : {'cd', 'mu'}, default='cd' + Numerical solver to use: + + - 'cd' is a Coordinate Descent solver that uses Fast Hierarchical + Alternating Least Squares (Fast HALS). + - 'mu' is a Multiplicative Update solver. + + .. versionadded:: 0.17 + Coordinate Descent solver. + + .. versionadded:: 0.19 + Multiplicative Update solver. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. Used only in 'mu' solver. + + .. versionadded:: 0.19 + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + .. versionadded:: 1.0 + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + .. versionadded:: 1.0 + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + random_state : int, RandomState instance or None, default=None + Used for NMF initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + verbose : int, default=0 + The verbosity level. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + Actual number of iterations. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import non_negative_factorization + >>> W, H, n_iter = non_negative_factorization( + ... X, n_components=2, init='random', random_state=0) + """ + est = NMF( + n_components=n_components, + init=init, + solver=solver, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + shuffle=shuffle, + ) + est._validate_params() + + X = check_array(X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]) + + with config_context(assume_finite=True): + W, H, n_iter = est._fit_transform(X, W=W, H=H, update_H=update_H) + + return W, H, n_iter + + +class _BaseNMF(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, ABC): + """Base class for NMF and MiniBatchNMF.""" + + # This prevents ``set_split_inverse_transform`` to be generated for the + # non-standard ``W`` arg on ``inverse_transform``. + # TODO: remove when W is removed in v1.5 for inverse_transform + __metadata_request__inverse_transform = {"W": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + StrOptions({"auto"}), + Hidden(StrOptions({"warn"})), + ], + "init": [ + StrOptions({"random", "nndsvd", "nndsvda", "nndsvdar", "custom"}), + None, + ], + "beta_loss": [ + StrOptions({"frobenius", "kullback-leibler", "itakura-saito"}), + Real, + ], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "alpha_W": [Interval(Real, 0, None, closed="left")], + "alpha_H": [Interval(Real, 0, None, closed="left"), StrOptions({"same"})], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "verbose": ["verbose"], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + random_state=None, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + verbose=0, + ): + self.n_components = n_components + self.init = init + self.beta_loss = beta_loss + self.tol = tol + self.max_iter = max_iter + self.random_state = random_state + self.alpha_W = alpha_W + self.alpha_H = alpha_H + self.l1_ratio = l1_ratio + self.verbose = verbose + + def _check_params(self, X): + # n_components + self._n_components = self.n_components + if self.n_components == "warn": + warnings.warn( + ( + "The default value of `n_components` will change from `None` to" + " `'auto'` in 1.6. Set the value of `n_components` to `None`" + " explicitly to suppress the warning." + ), + FutureWarning, + ) + self._n_components = None # Keeping the old default value + if self._n_components is None: + self._n_components = X.shape[1] + + # beta_loss + self._beta_loss = _beta_loss_to_float(self.beta_loss) + + def _check_w_h(self, X, W, H, update_H): + """Check W and H, or initialize them.""" + n_samples, n_features = X.shape + + if self.init == "custom" and update_H: + _check_init(H, (self._n_components, n_features), "NMF (input H)") + _check_init(W, (n_samples, self._n_components), "NMF (input W)") + if self._n_components == "auto": + self._n_components = H.shape[0] + + if H.dtype != X.dtype or W.dtype != X.dtype: + raise TypeError( + "H and W should have the same dtype as X. Got " + "H.dtype = {} and W.dtype = {}.".format(H.dtype, W.dtype) + ) + + elif not update_H: + if W is not None: + warnings.warn( + "When update_H=False, the provided initial W is not used.", + RuntimeWarning, + ) + + _check_init(H, (self._n_components, n_features), "NMF (input H)") + if self._n_components == "auto": + self._n_components = H.shape[0] + + if H.dtype != X.dtype: + raise TypeError( + "H should have the same dtype as X. Got H.dtype = {}.".format( + H.dtype + ) + ) + + # 'mu' solver should not be initialized by zeros + if self.solver == "mu": + avg = np.sqrt(X.mean() / self._n_components) + W = np.full((n_samples, self._n_components), avg, dtype=X.dtype) + else: + W = np.zeros((n_samples, self._n_components), dtype=X.dtype) + + else: + if W is not None or H is not None: + warnings.warn( + ( + "When init!='custom', provided W or H are ignored. Set " + " init='custom' to use them as initialization." + ), + RuntimeWarning, + ) + + if self._n_components == "auto": + self._n_components = X.shape[1] + + W, H = _initialize_nmf( + X, self._n_components, init=self.init, random_state=self.random_state + ) + + return W, H + + def _compute_regularization(self, X): + """Compute scaled regularization terms.""" + n_samples, n_features = X.shape + alpha_W = self.alpha_W + alpha_H = self.alpha_W if self.alpha_H == "same" else self.alpha_H + + l1_reg_W = n_features * alpha_W * self.l1_ratio + l1_reg_H = n_samples * alpha_H * self.l1_ratio + l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio) + l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio) + + return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H + + def fit(self, X, y=None, **params): + """Learn a NMF model for the data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + **params : kwargs + Parameters (keyword arguments) and values passed to + the fit_transform instance. + + Returns + ------- + self : object + Returns the instance itself. + """ + # param validation is done in fit_transform + + self.fit_transform(X, **params) + return self + + def inverse_transform(self, Xt=None, W=None): + """Transform data back to its original space. + + .. versionadded:: 0.18 + + Parameters + ---------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_components) + Transformed data matrix. + + W : deprecated + Use `Xt` instead. + + .. deprecated:: 1.3 + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + Returns a data matrix of the original shape. + """ + if Xt is None and W is None: + raise TypeError("Missing required positional argument: Xt") + + if W is not None and Xt is not None: + raise ValueError("Please provide only `Xt`, and not `W`.") + + if W is not None: + warnings.warn( + ( + "Input argument `W` was renamed to `Xt` in v1.3 and will be removed" + " in v1.5." + ), + FutureWarning, + ) + Xt = W + + check_is_fitted(self) + return Xt @ self.components_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "requires_positive_X": True, + "preserves_dtype": [np.float64, np.float32], + } + + +class NMF(_BaseNMF): + """Non-Negative Matrix Factorization (NMF). + + Find two non-negative matrices, i.e. matrices with all non-negative elements, (W, H) + whose product approximates the non-negative matrix X. This factorization can be used + for example for dimensionality reduction, source separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The regularization terms are scaled by `n_features` for `W` and by `n_samples` for + `H` to keep their impact balanced with respect to one another and to the data fit + term as independent as possible of the size `n_samples` of the training set. + + The objective function is minimized with an alternating minimization of W + and H. + + Note that the transformed data is named W and the components matrix is named H. In + the NMF literature, the naming convention is usually the opposite since the data + matrix X is transposed. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int or {'auto'} or None, default=None + Number of components, if n_components is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from W or H shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + Valid options: + + - `None`: 'nndsvda' if n_components <= min(n_samples, n_features), + otherwise random. + + - `'random'`: non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + + - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + + - `'nndsvda'`: NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + + - `'nndsvdar'` NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + + - `'custom'`: Use custom matrices `W` and `H` which must both be provided. + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + solver : {'cd', 'mu'}, default='cd' + Numerical solver to use: + + - 'cd' is a Coordinate Descent solver. + - 'mu' is a Multiplicative Update solver. + + .. versionadded:: 0.17 + Coordinate Descent solver. + + .. versionadded:: 0.19 + Multiplicative Update solver. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. Used only in 'mu' solver. + + .. versionadded:: 0.19 + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + random_state : int, RandomState instance or None, default=None + Used for initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + .. versionadded:: 1.0 + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + .. versionadded:: 1.0 + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + .. versionadded:: 0.17 + Regularization parameter *l1_ratio* used in the Coordinate Descent + solver. + + verbose : int, default=0 + Whether to be verbose. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + .. versionadded:: 0.17 + *shuffle* parameter used in the Coordinate Descent solver. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_components_ : int + The number of components. It is same as the `n_components` parameter + if it was given. Otherwise, it will be same as the number of + features. + + reconstruction_err_ : float + Frobenius norm of the matrix difference, or beta-divergence, between + the training data ``X`` and the reconstructed data ``WH`` from + the fitted model. + + n_iter_ : int + Actual number of iterations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + PCA : Principal component analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import NMF + >>> model = NMF(n_components=2, init='random', random_state=0) + >>> W = model.fit_transform(X) + >>> H = model.components_ + """ + + _parameter_constraints: dict = { + **_BaseNMF._parameter_constraints, + "solver": [StrOptions({"mu", "cd"})], + "shuffle": ["boolean"], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + solver="cd", + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + random_state=None, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + verbose=0, + shuffle=False, + ): + super().__init__( + n_components=n_components, + init=init, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + ) + + self.solver = solver + self.shuffle = shuffle + + def _check_params(self, X): + super()._check_params(X) + + # solver + if self.solver != "mu" and self.beta_loss not in (2, "frobenius"): + # 'mu' is the only solver that handles other beta losses than 'frobenius' + raise ValueError( + f"Invalid beta_loss parameter: solver {self.solver!r} does not handle " + f"beta_loss = {self.beta_loss!r}" + ) + if self.solver == "mu" and self.init == "nndsvd": + warnings.warn( + ( + "The multiplicative update ('mu') solver cannot update " + "zeros present in the initialization, and so leads to " + "poorer results when used jointly with init='nndsvd'. " + "You may try init='nndsvda' or init='nndsvdar' instead." + ), + UserWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, W=None, H=None): + """Learn a NMF model for the data X and returns the transformed data. + + This is more efficient than calling fit followed by transform. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32] + ) + + with config_context(assume_finite=True): + W, H, n_iter = self._fit_transform(X, W=W, H=H) + + self.reconstruction_err_ = _beta_divergence( + X, W, H, self._beta_loss, square_root=True + ) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_iter_ = n_iter + + return W + + def _fit_transform(self, X, y=None, W=None, H=None, update_H=True): + """Learn a NMF model for the data X and returns the transformed data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed + + y : Ignored + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + update_H : bool, default=True + If True, both W and H will be estimated from initial guesses, + this corresponds to a call to the 'fit_transform' method. + If False, only W will be estimated, this corresponds to a call + to the 'transform' method. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + + H : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_iter_ : int + Actual number of iterations. + """ + check_non_negative(X, "NMF (input X)") + + # check parameters + self._check_params(X) + + if X.min() == 0 and self._beta_loss <= 0: + raise ValueError( + "When beta_loss <= 0 and X contains zeros, " + "the solver may diverge. Please add small values " + "to X, or use a positive beta_loss." + ) + + # initialize or check W and H + W, H = self._check_w_h(X, W, H, update_H) + + # scale the regularization terms + l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X) + + if self.solver == "cd": + W, H, n_iter = _fit_coordinate_descent( + X, + W, + H, + self.tol, + self.max_iter, + l1_reg_W, + l1_reg_H, + l2_reg_W, + l2_reg_H, + update_H=update_H, + verbose=self.verbose, + shuffle=self.shuffle, + random_state=self.random_state, + ) + elif self.solver == "mu": + W, H, n_iter, *_ = _fit_multiplicative_update( + X, + W, + H, + self._beta_loss, + self.max_iter, + self.tol, + l1_reg_W, + l1_reg_H, + l2_reg_W, + l2_reg_H, + update_H, + self.verbose, + ) + else: + raise ValueError("Invalid solver parameter '%s'." % self.solver) + + if n_iter == self.max_iter and self.tol > 0: + warnings.warn( + "Maximum number of iterations %d reached. Increase " + "it to improve convergence." + % self.max_iter, + ConvergenceWarning, + ) + + return W, H, n_iter + + def transform(self, X): + """Transform the data X according to the fitted NMF model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False + ) + + with config_context(assume_finite=True): + W, *_ = self._fit_transform(X, H=self.components_, update_H=False) + + return W + + +class MiniBatchNMF(_BaseNMF): + """Mini-Batch Non-Negative Matrix Factorization (NMF). + + .. versionadded:: 1.1 + + Find two non-negative matrices, i.e. matrices with all non-negative elements, + (`W`, `H`) whose product approximates the non-negative matrix `X`. This + factorization can be used for example for dimensionality reduction, source + separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}^2` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The objective function is minimized with an alternating minimization of `W` + and `H`. + + Note that the transformed data is named `W` and the components matrix is + named `H`. In the NMF literature, the naming convention is usually the opposite + since the data matrix `X` is transposed. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int or {'auto'} or None, default=None + Number of components, if `n_components` is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from W or H shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + Valid options: + + - `None`: 'nndsvda' if `n_components <= min(n_samples, n_features)`, + otherwise random. + + - `'random'`: non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + + - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness). + + - `'nndsvda'`: NNDSVD with zeros filled with the average of X + (better when sparsity is not desired). + + - `'nndsvdar'` NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired). + + - `'custom'`: Use custom matrices `W` and `H` which must both be provided. + + batch_size : int, default=1024 + Number of samples in each mini-batch. Large batch sizes + give better long-term convergence at the cost of a slower start. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between `X` + and the dot product `WH`. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for `beta_loss <= 0` (or 'itakura-saito'), the input + matrix `X` cannot contain zeros. + + tol : float, default=1e-4 + Control early stopping based on the norm of the differences in `H` + between 2 steps. To disable early stopping based on changes in `H`, set + `tol` to 0.0. + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + max_iter : int, default=200 + Maximum number of iterations over the complete dataset before + timing out. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + forget_factor : float, default=0.7 + Amount of rescaling of past information. Its value could be 1 with + finite datasets. Choosing values < 1 is recommended with online + learning as more recent batches will weight more than past batches. + + fresh_restarts : bool, default=False + Whether to completely solve for W at each step. Doing fresh restarts will likely + lead to a better solution for a same number of iterations but it is much slower. + + fresh_restarts_max_iter : int, default=30 + Maximum number of iterations when solving for W at each step. Only used when + doing fresh restarts. These iterations may be stopped early based on a small + change of W controlled by `tol`. + + transform_max_iter : int, default=None + Maximum number of iterations when solving for W at transform time. + If None, it defaults to `max_iter`. + + random_state : int, RandomState instance or None, default=None + Used for initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + verbose : bool, default=False + Whether to be verbose. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_components_ : int + The number of components. It is same as the `n_components` parameter + if it was given. Otherwise, it will be same as the number of + features. + + reconstruction_err_ : float + Frobenius norm of the matrix difference, or beta-divergence, between + the training data `X` and the reconstructed data `WH` from + the fitted model. + + n_iter_ : int + Actual number of started iterations over the whole dataset. + + n_steps_ : int + Number of mini-batches processed. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + See Also + -------- + NMF : Non-negative matrix factorization. + MiniBatchDictionaryLearning : Finds a dictionary that can best be used to represent + data using a sparse code. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + .. [3] :doi:`"Online algorithms for nonnegative matrix factorization with the + Itakura-Saito divergence" <10.1109/ASPAA.2011.6082314>` + Lefevre, A., Bach, F., Fevotte, C. (2011). WASPA. + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import MiniBatchNMF + >>> model = MiniBatchNMF(n_components=2, init='random', random_state=0) + >>> W = model.fit_transform(X) + >>> H = model.components_ + """ + + _parameter_constraints: dict = { + **_BaseNMF._parameter_constraints, + "max_no_improvement": [Interval(Integral, 1, None, closed="left"), None], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "forget_factor": [Interval(Real, 0, 1, closed="both")], + "fresh_restarts": ["boolean"], + "fresh_restarts_max_iter": [Interval(Integral, 1, None, closed="left")], + "transform_max_iter": [Interval(Integral, 1, None, closed="left"), None], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + batch_size=1024, + beta_loss="frobenius", + tol=1e-4, + max_no_improvement=10, + max_iter=200, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + forget_factor=0.7, + fresh_restarts=False, + fresh_restarts_max_iter=30, + transform_max_iter=None, + random_state=None, + verbose=0, + ): + super().__init__( + n_components=n_components, + init=init, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + ) + + self.max_no_improvement = max_no_improvement + self.batch_size = batch_size + self.forget_factor = forget_factor + self.fresh_restarts = fresh_restarts + self.fresh_restarts_max_iter = fresh_restarts_max_iter + self.transform_max_iter = transform_max_iter + + def _check_params(self, X): + super()._check_params(X) + + # batch_size + self._batch_size = min(self.batch_size, X.shape[0]) + + # forget_factor + self._rho = self.forget_factor ** (self._batch_size / X.shape[0]) + + # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] + if self._beta_loss < 1: + self._gamma = 1.0 / (2.0 - self._beta_loss) + elif self._beta_loss > 2: + self._gamma = 1.0 / (self._beta_loss - 1.0) + else: + self._gamma = 1.0 + + # transform_max_iter + self._transform_max_iter = ( + self.max_iter + if self.transform_max_iter is None + else self.transform_max_iter + ) + + return self + + def _solve_W(self, X, H, max_iter): + """Minimize the objective function w.r.t W. + + Update W with H being fixed, until convergence. This is the heart + of `transform` but it's also used during `fit` when doing fresh restarts. + """ + avg = np.sqrt(X.mean() / self._n_components) + W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype) + W_buffer = W.copy() + + # Get scaled regularization terms. Done for each minibatch to take into account + # variable sizes of minibatches. + l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X) + + for _ in range(max_iter): + W, *_ = _multiplicative_update_w( + X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma + ) + + W_diff = linalg.norm(W - W_buffer) / linalg.norm(W) + if self.tol > 0 and W_diff <= self.tol: + break + + W_buffer[:] = W + + return W + + def _minibatch_step(self, X, W, H, update_H): + """Perform the update of W and H for one minibatch.""" + batch_size = X.shape[0] + + # get scaled regularization terms. Done for each minibatch to take into account + # variable sizes of minibatches. + l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X) + + # update W + if self.fresh_restarts or W is None: + W = self._solve_W(X, H, self.fresh_restarts_max_iter) + else: + W, *_ = _multiplicative_update_w( + X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma + ) + + # necessary for stability with beta_loss < 1 + if self._beta_loss < 1: + W[W < np.finfo(np.float64).eps] = 0.0 + + batch_cost = ( + _beta_divergence(X, W, H, self._beta_loss) + + l1_reg_W * W.sum() + + l1_reg_H * H.sum() + + l2_reg_W * (W**2).sum() + + l2_reg_H * (H**2).sum() + ) / batch_size + + # update H (only at fit or fit_transform) + if update_H: + H[:] = _multiplicative_update_h( + X, + W, + H, + beta_loss=self._beta_loss, + l1_reg_H=l1_reg_H, + l2_reg_H=l2_reg_H, + gamma=self._gamma, + A=self._components_numerator, + B=self._components_denominator, + rho=self._rho, + ) + + # necessary for stability with beta_loss < 1 + if self._beta_loss <= 1: + H[H < np.finfo(np.float64).eps] = 0.0 + + return batch_cost + + def _minibatch_convergence( + self, X, batch_cost, H, H_buffer, n_samples, step, n_steps + ): + """Helper function to encapsulate the early stopping logic""" + batch_size = X.shape[0] + + # counts steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore first iteration because H is not updated yet. + if step == 1: + if self.verbose: + print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}") + return False + + # Compute an Exponentially Weighted Average of the cost function to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_cost is None: + self._ewa_cost = batch_cost + else: + alpha = batch_size / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha + + # Log progress to be able to monitor convergence + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch cost: " + f"{batch_cost}, ewa cost: {self._ewa_cost}" + ) + + # Early stopping based on change of H + H_diff = linalg.norm(H - H_buffer) / linalg.norm(H) + if self.tol > 0 and H_diff <= self.tol: + if self.verbose: + print(f"Converged (small H change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # cost function + if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min: + self._no_improvement = 0 + self._ewa_cost_min = self._ewa_cost + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in objective function) " + f"at step {step}/{n_steps}" + ) + return True + + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, W=None, H=None): + """Learn a NMF model for the data X and returns the transformed data. + + This is more efficient than calling fit followed by transform. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + y : Ignored + Not used, present here for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32] + ) + + with config_context(assume_finite=True): + W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H) + + self.reconstruction_err_ = _beta_divergence( + X, W, H, self._beta_loss, square_root=True + ) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_iter_ = n_iter + self.n_steps_ = n_steps + + return W + + def _fit_transform(self, X, W=None, H=None, update_H=True): + """Learn a NMF model for the data X and returns the transformed data. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + update_H : bool, default=True + If True, both W and H will be estimated from initial guesses, + this corresponds to a call to the `fit_transform` method. + If False, only W will be estimated, this corresponds to a call + to the `transform` method. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + + H : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_iter : int + Actual number of started iterations over the whole dataset. + + n_steps : int + Number of mini-batches processed. + """ + check_non_negative(X, "MiniBatchNMF (input X)") + self._check_params(X) + + if X.min() == 0 and self._beta_loss <= 0: + raise ValueError( + "When beta_loss <= 0 and X contains zeros, " + "the solver may diverge. Please add small values " + "to X, or use a positive beta_loss." + ) + + n_samples = X.shape[0] + + # initialize or check W and H + W, H = self._check_w_h(X, W, H, update_H) + H_buffer = H.copy() + + # Initialize auxiliary matrices + self._components_numerator = H.copy() + self._components_denominator = np.ones(H.shape, dtype=H.dtype) + + # Attributes to monitor the convergence + self._ewa_cost = None + self._ewa_cost_min = None + self._no_improvement = 0 + + batches = gen_batches(n_samples, self._batch_size) + batches = itertools.cycle(batches) + n_steps_per_iter = int(np.ceil(n_samples / self._batch_size)) + n_steps = self.max_iter * n_steps_per_iter + + for i, batch in zip(range(n_steps), batches): + batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H) + + if update_H and self._minibatch_convergence( + X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps + ): + break + + H_buffer[:] = H + + if self.fresh_restarts: + W = self._solve_W(X, H, self._transform_max_iter) + + n_steps = i + 1 + n_iter = int(np.ceil(n_steps / n_steps_per_iter)) + + if n_iter == self.max_iter and self.tol > 0: + warnings.warn( + ( + f"Maximum number of iterations {self.max_iter} reached. " + "Increase it to improve convergence." + ), + ConvergenceWarning, + ) + + return W, H, n_iter, n_steps + + def transform(self, X): + """Transform the data X according to the fitted MiniBatchNMF model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be transformed by the model. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False + ) + + W = self._solve_W(X, self.components_, self._transform_max_iter) + + return W + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, W=None, H=None): + """Update the model using the data in `X` as a mini-batch. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once (see :ref:`scaling_strategies`). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + y : Ignored + Not used, present here for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + Only used for the first call to `partial_fit`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + Only used for the first call to `partial_fit`. + + Returns + ------- + self + Returns the instance itself. + """ + has_components = hasattr(self, "components_") + + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=[np.float64, np.float32], + reset=not has_components, + ) + + if not has_components: + # This instance has not been fitted yet (fit or partial_fit) + self._check_params(X) + _, H = self._check_w_h(X, W=W, H=H, update_H=True) + + self._components_numerator = H.copy() + self._components_denominator = np.ones(H.shape, dtype=H.dtype) + self.n_steps_ = 0 + else: + H = self.components_ + + self._minibatch_step(X, None, H, update_H=True) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_steps_ += 1 + + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c95f066f9667b7156ceeda679229829efaec9ec8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..d121c5e5c186fbfda337b203603ea69c7c2c1451 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_pca.py @@ -0,0 +1,747 @@ +""" Principal Component Analysis. +""" + +# Author: Alexandre Gramfort +# Olivier Grisel +# Mathieu Blondel +# Denis A. Engemann +# Michael Eickenberg +# Giorgio Patrini +# +# License: BSD 3 clause + +from math import log, sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.sparse import issparse +from scipy.sparse.linalg import svds +from scipy.special import gammaln + +from ..base import _fit_context +from ..utils import check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._array_api import _convert_to_numpy, get_namespace +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils.extmath import fast_logdet, randomized_svd, stable_cumsum, svd_flip +from ..utils.sparsefuncs import _implicit_column_offset, mean_variance_axis +from ..utils.validation import check_is_fitted +from ._base import _BasePCA + + +def _assess_dimension(spectrum, rank, n_samples): + """Compute the log-likelihood of a rank ``rank`` dataset. + + The dataset is assumed to be embedded in gaussian noise of shape(n, + dimf) having spectrum ``spectrum``. This implements the method of + T. P. Minka. + + Parameters + ---------- + spectrum : ndarray of shape (n_features,) + Data spectrum. + rank : int + Tested rank value. It should be strictly lower than n_features, + otherwise the method isn't specified (division by zero in equation + (31) from the paper). + n_samples : int + Number of samples. + + Returns + ------- + ll : float + The log-likelihood. + + References + ---------- + This implements the method of `Thomas P. Minka: + Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 + `_ + """ + xp, _ = get_namespace(spectrum) + + n_features = spectrum.shape[0] + if not 1 <= rank < n_features: + raise ValueError("the tested rank should be in [1, n_features - 1]") + + eps = 1e-15 + + if spectrum[rank - 1] < eps: + # When the tested rank is associated with a small eigenvalue, there's + # no point in computing the log-likelihood: it's going to be very + # small and won't be the max anyway. Also, it can lead to numerical + # issues below when computing pa, in particular in log((spectrum[i] - + # spectrum[j]) because this will take the log of something very small. + return -xp.inf + + pu = -rank * log(2.0) + for i in range(1, rank + 1): + pu += ( + gammaln((n_features - i + 1) / 2.0) + - log(xp.pi) * (n_features - i + 1) / 2.0 + ) + + pl = xp.sum(xp.log(spectrum[:rank])) + pl = -pl * n_samples / 2.0 + + v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank)) + pv = -log(v) * n_samples * (n_features - rank) / 2.0 + + m = n_features * rank - rank * (rank + 1.0) / 2.0 + pp = log(2.0 * xp.pi) * (m + rank) / 2.0 + + pa = 0.0 + spectrum_ = xp.asarray(spectrum, copy=True) + spectrum_[rank:n_features] = v + for i in range(rank): + for j in range(i + 1, spectrum.shape[0]): + pa += log( + (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]) + ) + log(n_samples) + + ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0 + + return ll + + +def _infer_dimension(spectrum, n_samples): + """Infers the dimension of a dataset with a given spectrum. + + The returned value will be in [1, n_features - 1]. + """ + xp, _ = get_namespace(spectrum) + + ll = xp.empty_like(spectrum) + ll[0] = -xp.inf # we don't want to return n_components = 0 + for rank in range(1, spectrum.shape[0]): + ll[rank] = _assess_dimension(spectrum, rank, n_samples) + return xp.argmax(ll) + + +class PCA(_BasePCA): + """Principal component analysis (PCA). + + Linear dimensionality reduction using Singular Value Decomposition of the + data to project it to a lower dimensional space. The input data is centered + but not scaled for each feature before applying the SVD. + + It uses the LAPACK implementation of the full SVD or a randomized truncated + SVD by the method of Halko et al. 2009, depending on the shape of the input + data and the number of components to extract. + + It can also use the scipy.sparse.linalg ARPACK implementation of the + truncated SVD. + + Notice that this class does not support sparse input. See + :class:`TruncatedSVD` for an alternative with sparse data. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py` + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, float or 'mle', default=None + Number of components to keep. + if n_components is not set all components are kept:: + + n_components == min(n_samples, n_features) + + If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's + MLE is used to guess the dimension. Use of ``n_components == 'mle'`` + will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``. + + If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the + number of components such that the amount of variance that needs to be + explained is greater than the percentage specified by n_components. + + If ``svd_solver == 'arpack'``, the number of components must be + strictly less than the minimum of n_features and n_samples. + + Hence, the None case results in:: + + n_components == min(n_samples, n_features) - 1 + + copy : bool, default=True + If False, data passed to fit are overwritten and running + fit(X).transform(X) will not yield the expected results, + use fit_transform(X) instead. + + whiten : bool, default=False + When True (False by default) the `components_` vectors are multiplied + by the square root of n_samples and then divided by the singular values + to ensure uncorrelated outputs with unit component-wise variances. + + Whitening will remove some information from the transformed signal + (the relative variance scales of the components) but can sometime + improve the predictive accuracy of the downstream estimators by + making their data respect some hard-wired assumptions. + + svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto' + If auto : + The solver is selected by a default policy based on `X.shape` and + `n_components`: if the input data is larger than 500x500 and the + number of components to extract is lower than 80% of the smallest + dimension of the data, then the more efficient 'randomized' + method is enabled. Otherwise the exact full SVD is computed and + optionally truncated afterwards. + If full : + run exact full SVD calling the standard LAPACK solver via + `scipy.linalg.svd` and select the components by postprocessing + If arpack : + run SVD truncated to n_components calling ARPACK solver via + `scipy.sparse.linalg.svds`. It requires strictly + 0 < n_components < min(X.shape) + If randomized : + run randomized SVD by the method of Halko et al. + + .. versionadded:: 0.18.0 + + tol : float, default=0.0 + Tolerance for singular values computed by svd_solver == 'arpack'. + Must be of range [0.0, infinity). + + .. versionadded:: 0.18.0 + + iterated_power : int or 'auto', default='auto' + Number of iterations for the power method computed by + svd_solver == 'randomized'. + Must be of range [0, infinity). + + .. versionadded:: 0.18.0 + + n_oversamples : int, default=10 + This parameter is only relevant when `svd_solver="randomized"`. + It corresponds to the additional number of random vectors to sample the + range of `X` so as to ensure proper conditioning. See + :func:`~sklearn.utils.extmath.randomized_svd` for more details. + + .. versionadded:: 1.1 + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Power iteration normalizer for randomized SVD solver. + Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` + for more details. + + .. versionadded:: 1.1 + + random_state : int, RandomState instance or None, default=None + Used when the 'arpack' or 'randomized' solvers are used. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + .. versionadded:: 0.18.0 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of + maximum variance in the data. Equivalently, the right singular + vectors of the centered input data, parallel to its eigenvectors. + The components are sorted by decreasing ``explained_variance_``. + + explained_variance_ : ndarray of shape (n_components,) + The amount of variance explained by each of the selected components. + The variance estimation uses `n_samples - 1` degrees of freedom. + + Equal to n_components largest eigenvalues + of the covariance matrix of X. + + .. versionadded:: 0.18 + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + + If ``n_components`` is not set then all components are stored and the + sum of the ratios is equal to 1.0. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + .. versionadded:: 0.19 + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + + Equal to `X.mean(axis=0)`. + + n_components_ : int + The estimated number of components. When n_components is set + to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this + number is estimated from input data. Otherwise it equals the parameter + n_components, or the lesser value of n_features and n_samples + if n_components is None. + + n_samples_ : int + Number of samples in the training data. + + noise_variance_ : float + The estimated noise covariance following the Probabilistic PCA model + from Tipping and Bishop 1999. See "Pattern Recognition and + Machine Learning" by C. Bishop, 12.2.1 p. 574 or + http://www.miketipping.com/papers/met-mppca.pdf. It is required to + compute the estimated data covariance and score samples. + + Equal to the average of (min(n_features, n_samples) - n_components) + smallest eigenvalues of the covariance matrix of X. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KernelPCA : Kernel Principal Component Analysis. + SparsePCA : Sparse Principal Component Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + IncrementalPCA : Incremental Principal Component Analysis. + + References + ---------- + For n_components == 'mle', this class uses the method from: + `Minka, T. P.. "Automatic choice of dimensionality for PCA". + In NIPS, pp. 598-604 `_ + + Implements the probabilistic PCA model from: + `Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal + component analysis". Journal of the Royal Statistical Society: + Series B (Statistical Methodology), 61(3), 611-622. + `_ + via the score and score_samples methods. + + For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`. + + For svd_solver == 'randomized', see: + :doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011). + "Finding structure with randomness: Probabilistic algorithms for + constructing approximate matrix decompositions". + SIAM review, 53(2), 217-288. + <10.1137/090771806>` + and also + :doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011). + "A randomized algorithm for the decomposition of matrices". + Applied and Computational Harmonic Analysis, 30(1), 47-68. + <10.1016/j.acha.2010.02.003>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import PCA + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> pca = PCA(n_components=2) + >>> pca.fit(X) + PCA(n_components=2) + >>> print(pca.explained_variance_ratio_) + [0.9924... 0.0075...] + >>> print(pca.singular_values_) + [6.30061... 0.54980...] + + >>> pca = PCA(n_components=2, svd_solver='full') + >>> pca.fit(X) + PCA(n_components=2, svd_solver='full') + >>> print(pca.explained_variance_ratio_) + [0.9924... 0.00755...] + >>> print(pca.singular_values_) + [6.30061... 0.54980...] + + >>> pca = PCA(n_components=1, svd_solver='arpack') + >>> pca.fit(X) + PCA(n_components=1, svd_solver='arpack') + >>> print(pca.explained_variance_ratio_) + [0.99244...] + >>> print(pca.singular_values_) + [6.30061...] + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 0, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="neither"), + StrOptions({"mle"}), + None, + ], + "copy": ["boolean"], + "whiten": ["boolean"], + "svd_solver": [StrOptions({"auto", "full", "arpack", "randomized"})], + "tol": [Interval(Real, 0, None, closed="left")], + "iterated_power": [ + StrOptions({"auto"}), + Interval(Integral, 0, None, closed="left"), + ], + "n_oversamples": [Interval(Integral, 1, None, closed="left")], + "power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + copy=True, + whiten=False, + svd_solver="auto", + tol=0.0, + iterated_power="auto", + n_oversamples=10, + power_iteration_normalizer="auto", + random_state=None, + ): + self.n_components = n_components + self.copy = copy + self.whiten = whiten + self.svd_solver = svd_solver + self.tol = tol + self.iterated_power = iterated_power + self.n_oversamples = n_oversamples + self.power_iteration_normalizer = power_iteration_normalizer + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Ignored. + + Returns + ------- + self : object + Returns the instance itself. + """ + self._fit(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model with X and apply the dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Ignored. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed values. + + Notes + ----- + This method returns a Fortran-ordered array. To convert it to a + C-ordered array, use 'np.ascontiguousarray'. + """ + U, S, Vt = self._fit(X) + U = U[:, : self.n_components_] + + if self.whiten: + # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) + U *= sqrt(X.shape[0] - 1) + else: + # X_new = X * V = U * S * Vt * V = U * S + U *= S[: self.n_components_] + + return U + + def _fit(self, X): + """Dispatch to the right submethod depending on the chosen solver.""" + xp, is_array_api_compliant = get_namespace(X) + + # Raise an error for sparse input and unsupported svd_solver + if issparse(X) and self.svd_solver != "arpack": + raise TypeError( + 'PCA only support sparse inputs with the "arpack" solver, while ' + f'"{self.svd_solver}" was passed. See TruncatedSVD for a possible' + " alternative." + ) + # Raise an error for non-Numpy input and arpack solver. + if self.svd_solver == "arpack" and is_array_api_compliant: + raise ValueError( + "PCA with svd_solver='arpack' is not supported for Array API inputs." + ) + + X = self._validate_data( + X, + dtype=[xp.float64, xp.float32], + accept_sparse=("csr", "csc"), + ensure_2d=True, + copy=self.copy, + ) + + # Handle n_components==None + if self.n_components is None: + if self.svd_solver != "arpack": + n_components = min(X.shape) + else: + n_components = min(X.shape) - 1 + else: + n_components = self.n_components + + # Handle svd_solver + self._fit_svd_solver = self.svd_solver + if self._fit_svd_solver == "auto": + # Small problem or n_components == 'mle', just call full PCA + if max(X.shape) <= 500 or n_components == "mle": + self._fit_svd_solver = "full" + elif 1 <= n_components < 0.8 * min(X.shape): + self._fit_svd_solver = "randomized" + # This is also the case of n_components in (0,1) + else: + self._fit_svd_solver = "full" + + # Call different fits for either full or truncated SVD + if self._fit_svd_solver == "full": + return self._fit_full(X, n_components) + elif self._fit_svd_solver in ["arpack", "randomized"]: + return self._fit_truncated(X, n_components, self._fit_svd_solver) + + def _fit_full(self, X, n_components): + """Fit the model by computing full SVD on X.""" + xp, is_array_api_compliant = get_namespace(X) + + n_samples, n_features = X.shape + + if n_components == "mle": + if n_samples < n_features: + raise ValueError( + "n_components='mle' is only supported if n_samples >= n_features" + ) + elif not 0 <= n_components <= min(n_samples, n_features): + raise ValueError( + "n_components=%r must be between 0 and " + "min(n_samples, n_features)=%r with " + "svd_solver='full'" % (n_components, min(n_samples, n_features)) + ) + + # Center data + self.mean_ = xp.mean(X, axis=0) + X -= self.mean_ + + if not is_array_api_compliant: + # Use scipy.linalg with NumPy/SciPy inputs for the sake of not + # introducing unanticipated behavior changes. In the long run we + # could instead decide to always use xp.linalg.svd for all inputs, + # but that would make this code rely on numpy's SVD instead of + # scipy's. It's not 100% clear whether they use the same LAPACK + # solver by default though (assuming both are built against the + # same BLAS). + U, S, Vt = linalg.svd(X, full_matrices=False) + else: + U, S, Vt = xp.linalg.svd(X, full_matrices=False) + # flip eigenvectors' sign to enforce deterministic output + U, Vt = svd_flip(U, Vt) + + components_ = Vt + + # Get variance explained by singular values + explained_variance_ = (S**2) / (n_samples - 1) + total_var = xp.sum(explained_variance_) + explained_variance_ratio_ = explained_variance_ / total_var + singular_values_ = xp.asarray(S, copy=True) # Store the singular values. + + # Postprocess the number of components required + if n_components == "mle": + n_components = _infer_dimension(explained_variance_, n_samples) + elif 0 < n_components < 1.0: + # number of components for which the cumulated explained + # variance percentage is superior to the desired threshold + # side='right' ensures that number of features selected + # their variance is always greater than n_components float + # passed. More discussion in issue: #15669 + if is_array_api_compliant: + # Convert to numpy as xp.cumsum and xp.searchsorted are not + # part of the Array API standard yet: + # + # https://github.com/data-apis/array-api/issues/597 + # https://github.com/data-apis/array-api/issues/688 + # + # Furthermore, it's not always safe to call them for namespaces + # that already implement them: for instance as + # cupy.searchsorted does not accept a float as second argument. + explained_variance_ratio_np = _convert_to_numpy( + explained_variance_ratio_, xp=xp + ) + else: + explained_variance_ratio_np = explained_variance_ratio_ + ratio_cumsum = stable_cumsum(explained_variance_ratio_np) + n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1 + + # Compute noise covariance using Probabilistic PCA model + # The sigma2 maximum likelihood (cf. eq. 12.46) + if n_components < min(n_features, n_samples): + self.noise_variance_ = xp.mean(explained_variance_[n_components:]) + else: + self.noise_variance_ = 0.0 + + self.n_samples_ = n_samples + self.components_ = components_[:n_components, :] + self.n_components_ = n_components + self.explained_variance_ = explained_variance_[:n_components] + self.explained_variance_ratio_ = explained_variance_ratio_[:n_components] + self.singular_values_ = singular_values_[:n_components] + + return U, S, Vt + + def _fit_truncated(self, X, n_components, svd_solver): + """Fit the model by computing truncated SVD (by ARPACK or randomized) + on X. + """ + xp, _ = get_namespace(X) + + n_samples, n_features = X.shape + + if isinstance(n_components, str): + raise ValueError( + "n_components=%r cannot be a string with svd_solver='%s'" + % (n_components, svd_solver) + ) + elif not 1 <= n_components <= min(n_samples, n_features): + raise ValueError( + "n_components=%r must be between 1 and " + "min(n_samples, n_features)=%r with " + "svd_solver='%s'" + % (n_components, min(n_samples, n_features), svd_solver) + ) + elif svd_solver == "arpack" and n_components == min(n_samples, n_features): + raise ValueError( + "n_components=%r must be strictly less than " + "min(n_samples, n_features)=%r with " + "svd_solver='%s'" + % (n_components, min(n_samples, n_features), svd_solver) + ) + + random_state = check_random_state(self.random_state) + + # Center data + total_var = None + if issparse(X): + self.mean_, var = mean_variance_axis(X, axis=0) + total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1 + X = _implicit_column_offset(X, self.mean_) + else: + self.mean_ = xp.mean(X, axis=0) + X -= self.mean_ + + if svd_solver == "arpack": + v0 = _init_arpack_v0(min(X.shape), random_state) + U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0) + # svds doesn't abide by scipy.linalg.svd/randomized_svd + # conventions, so reverse its outputs. + S = S[::-1] + # flip eigenvectors' sign to enforce deterministic output + U, Vt = svd_flip(U[:, ::-1], Vt[::-1]) + + elif svd_solver == "randomized": + # sign flipping is done inside + U, S, Vt = randomized_svd( + X, + n_components=n_components, + n_oversamples=self.n_oversamples, + n_iter=self.iterated_power, + power_iteration_normalizer=self.power_iteration_normalizer, + flip_sign=True, + random_state=random_state, + ) + + self.n_samples_ = n_samples + self.components_ = Vt + self.n_components_ = n_components + + # Get variance explained by singular values + self.explained_variance_ = (S**2) / (n_samples - 1) + + # Workaround in-place variance calculation since at the time numpy + # did not have a way to calculate variance in-place. + # + # TODO: update this code to either: + # * Use the array-api variance calculation, unless memory usage suffers + # * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api + # See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991 + if total_var is None: + N = X.shape[0] - 1 + X **= 2 + total_var = xp.sum(X) / N + + self.explained_variance_ratio_ = self.explained_variance_ / total_var + self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values. + + if self.n_components_ < min(n_features, n_samples): + self.noise_variance_ = total_var - xp.sum(self.explained_variance_) + self.noise_variance_ /= min(n_features, n_samples) - n_components + else: + self.noise_variance_ = 0.0 + + return U, S, Vt + + def score_samples(self, X): + """Return the log-likelihood of each sample. + + See. "Pattern Recognition and Machine Learning" + by C. Bishop, 12.2.1 p. 574 + or http://www.miketipping.com/papers/met-mppca.pdf + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + Returns + ------- + ll : ndarray of shape (n_samples,) + Log-likelihood of each sample under the current model. + """ + check_is_fitted(self) + xp, _ = get_namespace(X) + X = self._validate_data(X, dtype=[xp.float64, xp.float32], reset=False) + Xr = X - self.mean_ + n_features = X.shape[1] + precision = self.get_precision() + log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision)) + return log_like + + def score(self, X, y=None): + """Return the average log-likelihood of all samples. + + See. "Pattern Recognition and Machine Learning" + by C. Bishop, 12.2.1 p. 574 + or http://www.miketipping.com/papers/met-mppca.pdf + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + y : Ignored + Ignored. + + Returns + ------- + ll : float + Average log-likelihood of the samples under the current model. + """ + xp, _ = get_namespace(X) + return float(xp.mean(self.score_samples(X))) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32], "array_api_support": True} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..b14df8c5f4d222a2750be1fc413a671dfbc558e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py @@ -0,0 +1,551 @@ +"""Matrix factorization with Sparse PCA.""" +# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..linear_model import ridge_regression +from ..utils import check_random_state +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import svd_flip +from ..utils.validation import check_array, check_is_fitted +from ._dict_learning import MiniBatchDictionaryLearning, dict_learning + + +class _BaseSparsePCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Base class for SparsePCA and MiniBatchSparsePCA""" + + _parameter_constraints: dict = { + "n_components": [None, Interval(Integral, 1, None, closed="left")], + "alpha": [Interval(Real, 0.0, None, closed="left")], + "ridge_alpha": [Interval(Real, 0.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "method": [StrOptions({"lars", "cd"})], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1000, + tol=1e-8, + method="lars", + n_jobs=None, + verbose=False, + random_state=None, + ): + self.n_components = n_components + self.alpha = alpha + self.ridge_alpha = ridge_alpha + self.max_iter = max_iter + self.tol = tol + self.method = method + self.n_jobs = n_jobs + self.verbose = verbose + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + random_state = check_random_state(self.random_state) + X = self._validate_data(X) + + self.mean_ = X.mean(axis=0) + X = X - self.mean_ + + if self.n_components is None: + n_components = X.shape[1] + else: + n_components = self.n_components + + return self._fit(X, n_components, random_state) + + def transform(self, X): + """Least Squares projection of the data onto the sparse components. + + To avoid instability issues in case the system is under-determined, + regularization can be applied (Ridge regression) via the + `ridge_alpha` parameter. + + Note that Sparse PCA components orthogonality is not enforced as in PCA + hence one cannot use a simple linear projection. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test data to be transformed, must have the same number of + features as the data used to train the model. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + X = X - self.mean_ + + U = ridge_regression( + self.components_.T, X.T, self.ridge_alpha, solver="cholesky" + ) + + return U + + def inverse_transform(self, X): + """Transform data from the latent space to the original space. + + This inversion is an approximation due to the loss of information + induced by the forward decomposition. + + .. versionadded:: 1.2 + + Parameters + ---------- + X : ndarray of shape (n_samples, n_components) + Data in the latent space. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Reconstructed data in the original space. + """ + check_is_fitted(self) + X = check_array(X) + + return (X @ self.components_) + self.mean_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } + + +class SparsePCA(_BaseSparsePCA): + """Sparse Principal Components Analysis (SparsePCA). + + Finds the set of sparse components that can optimally reconstruct + the data. The amount of sparseness is controllable by the coefficient + of the L1 penalty, given by the parameter alpha. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of sparse atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1 + Sparsity controlling parameter. Higher values lead to sparser + components. + + ridge_alpha : float, default=0.01 + Amount of ridge shrinkage to apply in order to improve + conditioning when calling the transform method. + + max_iter : int, default=1000 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for the stopping condition. + + method : {'lars', 'cd'}, default='lars' + Method to be used for optimization. + lars: uses the least angle regression method to solve the lasso problem + (linear_model.lars_path) + cd: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + U_init : ndarray of shape (n_samples, n_components), default=None + Initial values for the loadings for warm restart scenarios. Only used + if `U_init` and `V_init` are not None. + + V_init : ndarray of shape (n_components, n_features), default=None + Initial values for the components for warm restart scenarios. Only used + if `U_init` and `V_init` are not None. + + verbose : int or bool, default=False + Controls the verbosity; the higher, the more messages. Defaults to 0. + + random_state : int, RandomState instance or None, default=None + Used during dictionary learning. Pass an int for reproducible results + across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Sparse components extracted from the data. + + error_ : ndarray + Vector of errors at each iteration. + + n_components_ : int + Estimated number of components. + + .. versionadded:: 0.23 + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + Equal to ``X.mean(axis=0)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA : Principal Component Analysis implementation. + MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less + accurate. + DictionaryLearning : Generic dictionary learning problem using a sparse code. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.decomposition import SparsePCA + >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0) + >>> transformer = SparsePCA(n_components=5, random_state=0) + >>> transformer.fit(X) + SparsePCA(...) + >>> X_transformed = transformer.transform(X) + >>> X_transformed.shape + (200, 5) + >>> # most values in the components_ are zero (sparsity) + >>> np.mean(transformer.components_ == 0) + 0.9666... + """ + + _parameter_constraints: dict = { + **_BaseSparsePCA._parameter_constraints, + "U_init": [None, np.ndarray], + "V_init": [None, np.ndarray], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1000, + tol=1e-8, + method="lars", + n_jobs=None, + U_init=None, + V_init=None, + verbose=False, + random_state=None, + ): + super().__init__( + n_components=n_components, + alpha=alpha, + ridge_alpha=ridge_alpha, + max_iter=max_iter, + tol=tol, + method=method, + n_jobs=n_jobs, + verbose=verbose, + random_state=random_state, + ) + self.U_init = U_init + self.V_init = V_init + + def _fit(self, X, n_components, random_state): + """Specialized `fit` for SparsePCA.""" + + code_init = self.V_init.T if self.V_init is not None else None + dict_init = self.U_init.T if self.U_init is not None else None + code, dictionary, E, self.n_iter_ = dict_learning( + X.T, + n_components, + alpha=self.alpha, + tol=self.tol, + max_iter=self.max_iter, + method=self.method, + n_jobs=self.n_jobs, + verbose=self.verbose, + random_state=random_state, + code_init=code_init, + dict_init=dict_init, + return_n_iter=True, + ) + # flip eigenvectors' sign to enforce deterministic output + code, dictionary = svd_flip(code, dictionary, u_based_decision=False) + self.components_ = code.T + components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis] + components_norm[components_norm == 0] = 1 + self.components_ /= components_norm + self.n_components_ = len(self.components_) + + self.error_ = E + return self + + +class MiniBatchSparsePCA(_BaseSparsePCA): + """Mini-batch Sparse Principal Components Analysis. + + Finds the set of sparse components that can optimally reconstruct + the data. The amount of sparseness is controllable by the coefficient + of the L1 penalty, given by the parameter alpha. + + For an example comparing sparse PCA to PCA, see + :ref:`sphx_glr_auto_examples_decomposition_plot_faces_decomposition.py` + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of sparse atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : int, default=1 + Sparsity controlling parameter. Higher values lead to sparser + components. + + ridge_alpha : float, default=0.01 + Amount of ridge shrinkage to apply in order to improve + conditioning when calling the transform method. + + max_iter : int, default=1_000 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.2 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `100`) instead. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + batch_size : int, default=3 + The number of features to take in each mini batch. + + verbose : int or bool, default=False + Controls the verbosity; the higher, the more messages. Defaults to 0. + + shuffle : bool, default=True + Whether to shuffle the data before splitting it in batches. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + method : {'lars', 'cd'}, default='lars' + Method to be used for optimization. + lars: uses the least angle regression method to solve the lasso problem + (linear_model.lars_path) + cd: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). Lars will be faster if + the estimated components are sparse. + + random_state : int, RandomState instance or None, default=None + Used for random shuffling when ``shuffle`` is set to ``True``, + during online dictionary learning. Pass an int for reproducible results + across multiple function calls. + See :term:`Glossary `. + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int or None, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to `None`. + + .. versionadded:: 1.1 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Sparse components extracted from the data. + + n_components_ : int + Estimated number of components. + + .. versionadded:: 0.23 + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + Equal to ``X.mean(axis=0)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + IncrementalPCA : Incremental principal components analysis. + PCA : Principal component analysis. + SparsePCA : Sparse Principal Components Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.decomposition import MiniBatchSparsePCA + >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0) + >>> transformer = MiniBatchSparsePCA(n_components=5, batch_size=50, + ... max_iter=10, random_state=0) + >>> transformer.fit(X) + MiniBatchSparsePCA(...) + >>> X_transformed = transformer.transform(X) + >>> X_transformed.shape + (200, 5) + >>> # most values in the components_ are zero (sparsity) + >>> np.mean(transformer.components_ == 0) + 0.9... + """ + + _parameter_constraints: dict = { + **_BaseSparsePCA._parameter_constraints, + "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)], + "callback": [None, callable], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1_000, + callback=None, + batch_size=3, + verbose=False, + shuffle=True, + n_jobs=None, + method="lars", + random_state=None, + tol=1e-3, + max_no_improvement=10, + ): + super().__init__( + n_components=n_components, + alpha=alpha, + ridge_alpha=ridge_alpha, + max_iter=max_iter, + tol=tol, + method=method, + n_jobs=n_jobs, + verbose=verbose, + random_state=random_state, + ) + self.callback = callback + self.batch_size = batch_size + self.shuffle = shuffle + self.max_no_improvement = max_no_improvement + + def _fit(self, X, n_components, random_state): + """Specialized `fit` for MiniBatchSparsePCA.""" + + transform_algorithm = "lasso_" + self.method + est = MiniBatchDictionaryLearning( + n_components=n_components, + alpha=self.alpha, + max_iter=self.max_iter, + dict_init=None, + batch_size=self.batch_size, + shuffle=self.shuffle, + n_jobs=self.n_jobs, + fit_algorithm=self.method, + random_state=random_state, + transform_algorithm=transform_algorithm, + transform_alpha=self.alpha, + verbose=self.verbose, + callback=self.callback, + tol=self.tol, + max_no_improvement=self.max_no_improvement, + ) + est.set_output(transform="default") + est.fit(X.T) + + self.components_, self.n_iter_ = est.transform(X.T).T, est.n_iter_ + + components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis] + components_norm[components_norm == 0] = 1 + self.components_ /= components_norm + self.n_components_ = len(self.components_) + + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py new file mode 100644 index 0000000000000000000000000000000000000000..725683e8d46c6eef2c7fc53780c65f91e51122cb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py @@ -0,0 +1,319 @@ +"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). +""" + +# Author: Lars Buitinck +# Olivier Grisel +# Michael Becker +# License: 3-clause BSD. + +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy.sparse.linalg import svds + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..utils import check_array, check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import check_is_fitted + +__all__ = ["TruncatedSVD"] + + +class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Dimensionality reduction using truncated SVD (aka LSA). + + This transformer performs linear dimensionality reduction by means of + truncated singular value decomposition (SVD). Contrary to PCA, this + estimator does not center the data before computing the singular value + decomposition. This means it can work with sparse matrices + efficiently. + + In particular, truncated SVD works on term count/tf-idf matrices as + returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In + that context, it is known as latent semantic analysis (LSA). + + This estimator supports two algorithms: a fast randomized SVD solver, and + a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or + `X.T * X`, whichever is more efficient. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=2 + Desired dimensionality of output data. + If algorithm='arpack', must be strictly less than the number of features. + If algorithm='randomized', must be less than or equal to the number of features. + The default value is useful for visualisation. For LSA, a value of + 100 is recommended. + + algorithm : {'arpack', 'randomized'}, default='randomized' + SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy + (scipy.sparse.linalg.svds), or "randomized" for the randomized + algorithm due to Halko (2009). + + n_iter : int, default=5 + Number of iterations for randomized SVD solver. Not used by ARPACK. The + default is larger than the default in + :func:`~sklearn.utils.extmath.randomized_svd` to handle sparse + matrices that may have large slowly decaying spectrum. + + n_oversamples : int, default=10 + Number of oversamples for randomized SVD solver. Not used by ARPACK. + See :func:`~sklearn.utils.extmath.randomized_svd` for a complete + description. + + .. versionadded:: 1.1 + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Power iteration normalizer for randomized SVD solver. + Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` + for more details. + + .. versionadded:: 1.1 + + random_state : int, RandomState instance or None, default=None + Used during randomized svd. Pass an int for reproducible results across + multiple function calls. + See :term:`Glossary `. + + tol : float, default=0.0 + Tolerance for ARPACK. 0 means machine precision. Ignored by randomized + SVD solver. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The right singular vectors of the input data. + + explained_variance_ : ndarray of shape (n_components,) + The variance of the training samples transformed by a projection to + each component. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + FactorAnalysis : A simple linear generative model with + Gaussian latent variables. + IncrementalPCA : Incremental principal components analysis. + KernelPCA : Kernel Principal component analysis. + NMF : Non-Negative Matrix Factorization. + PCA : Principal component analysis. + + Notes + ----- + SVD suffers from a problem called "sign indeterminacy", which means the + sign of the ``components_`` and the output from transform depend on the + algorithm and random state. To work around this, fit instances of this + class to data once, then keep the instance around to do transformations. + + References + ---------- + :arxiv:`Halko, et al. (2009). "Finding structure with randomness: + Stochastic algorithms for constructing approximate matrix decompositions" + <0909.4061>` + + Examples + -------- + >>> from sklearn.decomposition import TruncatedSVD + >>> from scipy.sparse import csr_matrix + >>> import numpy as np + >>> np.random.seed(0) + >>> X_dense = np.random.rand(100, 100) + >>> X_dense[:, 2 * np.arange(50)] = 0 + >>> X = csr_matrix(X_dense) + >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) + >>> svd.fit(X) + TruncatedSVD(n_components=5, n_iter=7, random_state=42) + >>> print(svd.explained_variance_ratio_) + [0.0157... 0.0512... 0.0499... 0.0479... 0.0453...] + >>> print(svd.explained_variance_ratio_.sum()) + 0.2102... + >>> print(svd.singular_values_) + [35.2410... 4.5981... 4.5420... 4.4486... 4.3288...] + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "algorithm": [StrOptions({"arpack", "randomized"})], + "n_iter": [Interval(Integral, 0, None, closed="left")], + "n_oversamples": [Interval(Integral, 1, None, closed="left")], + "power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})], + "random_state": ["random_state"], + "tol": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + n_components=2, + *, + algorithm="randomized", + n_iter=5, + n_oversamples=10, + power_iteration_normalizer="auto", + random_state=None, + tol=0.0, + ): + self.algorithm = algorithm + self.n_components = n_components + self.n_iter = n_iter + self.n_oversamples = n_oversamples + self.power_iteration_normalizer = power_iteration_normalizer + self.random_state = random_state + self.tol = tol + + def fit(self, X, y=None): + """Fit model on training data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the transformer object. + """ + self.fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit model to X and perform dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Reduced version of X. This will always be a dense array. + """ + X = self._validate_data(X, accept_sparse=["csr", "csc"], ensure_min_features=2) + random_state = check_random_state(self.random_state) + + if self.algorithm == "arpack": + v0 = _init_arpack_v0(min(X.shape), random_state) + U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0) + # svds doesn't abide by scipy.linalg.svd/randomized_svd + # conventions, so reverse its outputs. + Sigma = Sigma[::-1] + U, VT = svd_flip(U[:, ::-1], VT[::-1]) + + elif self.algorithm == "randomized": + if self.n_components > X.shape[1]: + raise ValueError( + f"n_components({self.n_components}) must be <=" + f" n_features({X.shape[1]})." + ) + U, Sigma, VT = randomized_svd( + X, + self.n_components, + n_iter=self.n_iter, + n_oversamples=self.n_oversamples, + power_iteration_normalizer=self.power_iteration_normalizer, + random_state=random_state, + ) + + self.components_ = VT + + # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T, + # X @ V is not the same as U @ Sigma + if self.algorithm == "randomized" or ( + self.algorithm == "arpack" and self.tol > 0 + ): + X_transformed = safe_sparse_dot(X, self.components_.T) + else: + X_transformed = U * Sigma + + # Calculate explained variance & explained variance ratio + self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) + if sp.issparse(X): + _, full_var = mean_variance_axis(X, axis=0) + full_var = full_var.sum() + else: + full_var = np.var(X, axis=0).sum() + self.explained_variance_ratio_ = exp_var / full_var + self.singular_values_ = Sigma # Store the singular values. + + return X_transformed + + def transform(self, X): + """Perform dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Reduced version of X. This will always be a dense array. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False) + return safe_sparse_dot(X, self.components_.T) + + def inverse_transform(self, X): + """Transform X back to its original space. + + Returns an array X_original whose transform would be X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + New data. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Note that this is always a dense array. + """ + X = check_array(X) + return np.dot(X, self.components_) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py new file mode 100644 index 0000000000000000000000000000000000000000..6a376b01ecb19ab531729307229161eaca34946e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py @@ -0,0 +1,451 @@ +""" +Test the fastica algorithm. +""" +import itertools +import os +import warnings + +import numpy as np +import pytest +from scipy import stats + +from sklearn.decomposition import PCA, FastICA, fastica +from sklearn.decomposition._fastica import _gs_decorrelation +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import assert_allclose + + +def center_and_norm(x, axis=-1): + """Centers and norms x **in place** + + Parameters + ----------- + x: ndarray + Array with an axis of observations (statistical units) measured on + random variables. + axis: int, optional + Axis along which the mean and variance are calculated. + """ + x = np.rollaxis(x, axis) + x -= x.mean(axis=0) + x /= x.std(axis=0) + + +def test_gs(): + # Test gram schmidt orthonormalization + # generate a random orthogonal matrix + rng = np.random.RandomState(0) + W, _, _ = np.linalg.svd(rng.randn(10, 10)) + w = rng.randn(10) + _gs_decorrelation(w, W, 10) + assert (w**2).sum() < 1.0e-10 + w = rng.randn(10) + u = _gs_decorrelation(w, W, 5) + tmp = np.dot(u, W.T) + assert (tmp[:5] ** 2).sum() < 1.0e-10 + + +def test_fastica_attributes_dtypes(global_dtype): + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + fica = FastICA( + n_components=5, max_iter=1000, whiten="unit-variance", random_state=0 + ).fit(X) + assert fica.components_.dtype == global_dtype + assert fica.mixing_.dtype == global_dtype + assert fica.mean_.dtype == global_dtype + assert fica.whitening_.dtype == global_dtype + + +def test_fastica_return_dtypes(global_dtype): + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + k_, mixing_, s_ = fastica( + X, max_iter=1000, whiten="unit-variance", random_state=rng + ) + assert k_.dtype == global_dtype + assert mixing_.dtype == global_dtype + assert s_.dtype == global_dtype + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_fastica_simple(add_noise, global_random_seed, global_dtype): + if ( + global_random_seed == 20 + and global_dtype == np.float32 + and not add_noise + and os.getenv("DISTRIB") == "ubuntu" + ): + pytest.xfail( + "FastICA instability with Ubuntu Atlas build with float32 " + "global_dtype. For more details, see " + "https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119" # noqa + ) + + # Test the FastICA algorithm on very simple data. + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + # Generate two sources: + s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 + s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed) + s = np.c_[s1, s2].T + center_and_norm(s) + s = s.astype(global_dtype) + s1, s2 = s + + # Mixing angle + phi = 0.6 + mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) + mixing = mixing.astype(global_dtype) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(2, 1000) + + center_and_norm(m) + + # function as fun arg + def g_test(x): + return x**3, (3 * x**2).mean(axis=-1) + + algos = ["parallel", "deflation"] + nls = ["logcosh", "exp", "cube", g_test] + whitening = ["arbitrary-variance", "unit-variance", False] + for algo, nl, whiten in itertools.product(algos, nls, whitening): + if whiten: + k_, mixing_, s_ = fastica( + m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng + ) + with pytest.raises(ValueError): + fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo) + else: + pca = PCA(n_components=2, whiten=True, random_state=rng) + X = pca.fit_transform(m.T) + k_, mixing_, s_ = fastica( + X, fun=nl, algorithm=algo, whiten=False, random_state=rng + ) + with pytest.raises(ValueError): + fastica(X, fun=np.tanh, algorithm=algo) + s_ = s_.T + # Check that the mixing model described in the docstring holds: + if whiten: + # XXX: exact reconstruction to standard relative tolerance is not + # possible. This is probably expected when add_noise is True but we + # also need a non-trivial atol in float32 when add_noise is False. + # + # Note that the 2 sources are non-Gaussian in this test. + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol) + + center_and_norm(s_) + s1_, s2_ = s_ + # Check to see if the sources have been estimated + # in the wrong order + if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): + s2_, s1_ = s_ + s1_ *= np.sign(np.dot(s1_, s1)) + s2_ *= np.sign(np.dot(s2_, s2)) + + # Check that we have estimated the original sources + if not add_noise: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2) + else: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1) + + # Test FastICA class + _, _, sources_fun = fastica( + m.T, fun=nl, algorithm=algo, random_state=global_random_seed + ) + ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed) + sources = ica.fit_transform(m.T) + assert ica.components_.shape == (2, 2) + assert sources.shape == (1000, 2) + + assert_allclose(sources_fun, sources) + # Set atol to account for the different magnitudes of the elements in sources + # (from 1e-4 to 1e1). + atol = np.max(np.abs(sources)) * (1e-5 if global_dtype == np.float32 else 1e-7) + assert_allclose(sources, ica.transform(m.T), atol=atol) + + assert ica.mixing_.shape == (2, 2) + + ica = FastICA(fun=np.tanh, algorithm=algo) + with pytest.raises(ValueError): + ica.fit(m.T) + + +def test_fastica_nowhiten(): + m = [[0, 1], [1, 0]] + + # test for issue #697 + ica = FastICA(n_components=1, whiten=False, random_state=0) + warn_msg = "Ignoring n_components with whiten=False." + with pytest.warns(UserWarning, match=warn_msg): + ica.fit(m) + assert hasattr(ica, "mixing_") + + +def test_fastica_convergence_fail(): + # Test the FastICA algorithm on very simple data + # (see test_non_square_fastica). + # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low. + rng = np.random.RandomState(0) + + n_samples = 1000 + # Generate two sources: + t = np.linspace(0, 100, n_samples) + s1 = np.sin(t) + s2 = np.ceil(np.sin(np.pi * t)) + s = np.c_[s1, s2].T + center_and_norm(s) + + # Mixing matrix + mixing = rng.randn(6, 2) + m = np.dot(mixing, s) + + # Do fastICA with tolerance 0. to ensure failing convergence + warn_msg = ( + "FastICA did not converge. Consider increasing tolerance " + "or the maximum number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warn_msg): + ica = FastICA( + algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.0 + ) + ica.fit(m.T) + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_non_square_fastica(add_noise): + # Test the FastICA algorithm on very simple data. + rng = np.random.RandomState(0) + + n_samples = 1000 + # Generate two sources: + t = np.linspace(0, 100, n_samples) + s1 = np.sin(t) + s2 = np.ceil(np.sin(np.pi * t)) + s = np.c_[s1, s2].T + center_and_norm(s) + s1, s2 = s + + # Mixing matrix + mixing = rng.randn(6, 2) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(6, n_samples) + + center_and_norm(m) + + k_, mixing_, s_ = fastica( + m.T, n_components=2, whiten="unit-variance", random_state=rng + ) + s_ = s_.T + + # Check that the mixing model described in the docstring holds: + assert_allclose(s_, np.dot(np.dot(mixing_, k_), m)) + + center_and_norm(s_) + s1_, s2_ = s_ + # Check to see if the sources have been estimated + # in the wrong order + if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): + s2_, s1_ = s_ + s1_ *= np.sign(np.dot(s1_, s1)) + s2_ *= np.sign(np.dot(s2_, s2)) + + # Check that we have estimated the original sources + if not add_noise: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3) + + +def test_fit_transform(global_random_seed, global_dtype): + """Test unit variance of transformed data using FastICA algorithm. + + Check that `fit_transform` gives the same result as applying + `fit` and then `transform`. + + Bug #13056 + """ + # multivariate uniform data in [0, 1] + rng = np.random.RandomState(global_random_seed) + X = rng.random_sample((100, 10)).astype(global_dtype) + max_iter = 300 + for whiten, n_components in [["unit-variance", 5], [False, None]]: + n_components_ = n_components if n_components is not None else X.shape[1] + + ica = FastICA( + n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0 + ) + with warnings.catch_warnings(): + # make sure that numerical errors do not cause sqrt of negative + # values + warnings.simplefilter("error", RuntimeWarning) + # XXX: for some seeds, the model does not converge. + # However this is not what we test here. + warnings.simplefilter("ignore", ConvergenceWarning) + Xt = ica.fit_transform(X) + assert ica.components_.shape == (n_components_, 10) + assert Xt.shape == (X.shape[0], n_components_) + + ica2 = FastICA( + n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0 + ) + with warnings.catch_warnings(): + # make sure that numerical errors do not cause sqrt of negative + # values + warnings.simplefilter("error", RuntimeWarning) + warnings.simplefilter("ignore", ConvergenceWarning) + ica2.fit(X) + assert ica2.components_.shape == (n_components_, 10) + Xt2 = ica2.transform(X) + + # XXX: we have to set atol for this test to pass for all seeds when + # fitting with float32 data. Is this revealing a bug? + if global_dtype: + atol = np.abs(Xt2).mean() / 1e6 + else: + atol = 0.0 # the default rtol is enough for float64 data + assert_allclose(Xt, Xt2, atol=atol) + + +@pytest.mark.filterwarnings("ignore:Ignoring n_components with whiten=False.") +@pytest.mark.parametrize( + "whiten, n_components, expected_mixing_shape", + [ + ("arbitrary-variance", 5, (10, 5)), + ("arbitrary-variance", 10, (10, 10)), + ("unit-variance", 5, (10, 5)), + ("unit-variance", 10, (10, 10)), + (False, 5, (10, 10)), + (False, 10, (10, 10)), + ], +) +def test_inverse_transform( + whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype +): + # Test FastICA.inverse_transform + n_samples = 100 + rng = np.random.RandomState(global_random_seed) + X = rng.random_sample((n_samples, 10)).astype(global_dtype) + + ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten) + with warnings.catch_warnings(): + # For some dataset (depending on the value of global_dtype) the model + # can fail to converge but this should not impact the definition of + # a valid inverse transform. + warnings.simplefilter("ignore", ConvergenceWarning) + Xt = ica.fit_transform(X) + assert ica.mixing_.shape == expected_mixing_shape + X2 = ica.inverse_transform(Xt) + assert X.shape == X2.shape + + # reversibility test in non-reduction case + if n_components == X.shape[1]: + # XXX: we have to set atol for this test to pass for all seeds when + # fitting with float32 data. Is this revealing a bug? + if global_dtype: + # XXX: dividing by a smaller number makes + # tests fail for some seeds. + atol = np.abs(X2).mean() / 1e5 + else: + atol = 0.0 # the default rtol is enough for float64 data + assert_allclose(X, X2, atol=atol) + + +def test_fastica_errors(): + n_features = 3 + n_samples = 10 + rng = np.random.RandomState(0) + X = rng.random_sample((n_samples, n_features)) + w_init = rng.randn(n_features + 1, n_features + 1) + with pytest.raises(ValueError, match=r"alpha must be in \[1,2\]"): + fastica(X, fun_args={"alpha": 0}) + with pytest.raises( + ValueError, match="w_init has invalid shape.+" r"should be \(3L?, 3L?\)" + ): + fastica(X, w_init=w_init) + + +def test_fastica_whiten_unit_variance(): + """Test unit variance of transformed data using FastICA algorithm. + + Bug #13056 + """ + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)) + n_components = X.shape[1] + ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0) + Xt = ica.fit_transform(X) + + assert np.var(Xt) == pytest.approx(1.0) + + +@pytest.mark.parametrize("whiten", ["arbitrary-variance", "unit-variance", False]) +@pytest.mark.parametrize("return_X_mean", [True, False]) +@pytest.mark.parametrize("return_n_iter", [True, False]) +def test_fastica_output_shape(whiten, return_X_mean, return_n_iter): + n_features = 3 + n_samples = 10 + rng = np.random.RandomState(0) + X = rng.random_sample((n_samples, n_features)) + + expected_len = 3 + return_X_mean + return_n_iter + + out = fastica( + X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean + ) + + assert len(out) == expected_len + if not whiten: + assert out[0] is None + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_fastica_simple_different_solvers(add_noise, global_random_seed): + """Test FastICA is consistent between whiten_solvers.""" + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + # Generate two sources: + s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 + s2 = stats.t.rvs(1, size=n_samples, random_state=rng) + s = np.c_[s1, s2].T + center_and_norm(s) + s1, s2 = s + + # Mixing angle + phi = rng.rand() * 2 * np.pi + mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(2, 1000) + + center_and_norm(m) + + outs = {} + for solver in ("svd", "eigh"): + ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver=solver) + sources = ica.fit_transform(m.T) + outs[solver] = sources + assert ica.components_.shape == (2, 2) + assert sources.shape == (1000, 2) + + # compared numbers are not all on the same magnitude. Using a small atol to + # make the test less brittle + assert_allclose(outs["eigh"], outs["svd"], atol=1e-12) + + +def test_fastica_eigh_low_rank_warning(global_random_seed): + """Test FastICA eigh solver raises warning for low-rank data.""" + rng = np.random.RandomState(global_random_seed) + A = rng.randn(10, 2) + X = A @ A.T + ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh") + msg = "There are some small singular values" + with pytest.warns(UserWarning, match=msg): + ica.fit(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py new file mode 100644 index 0000000000000000000000000000000000000000..2112b59129e254eea3fffcec23ce08bb974cacd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py @@ -0,0 +1,1062 @@ +import re +import sys +import warnings +from io import StringIO + +import numpy as np +import pytest +from scipy import linalg + +from sklearn.base import clone +from sklearn.decomposition import NMF, MiniBatchNMF, non_negative_factorization +from sklearn.decomposition import _nmf as nmf # For testing internals +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.extmath import squared_norm +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_convergence_warning(Estimator, solver): + convergence_warning = ( + "Maximum number of iterations 1 reached. Increase it to improve convergence." + ) + A = np.ones((2, 2)) + with pytest.warns(ConvergenceWarning, match=convergence_warning): + Estimator(max_iter=1, n_components="auto", **solver).fit(A) + + +def test_initialize_nn_output(): + # Test that initialization does not return negative values + rng = np.random.mtrand.RandomState(42) + data = np.abs(rng.randn(10, 10)) + for init in ("random", "nndsvd", "nndsvda", "nndsvdar"): + W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0) + assert not ((W < 0).any() or (H < 0).any()) + + +# TODO(1.6): remove the warning filter for `n_components` +@pytest.mark.filterwarnings( + r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in" + r" the initialization", + "ignore:The default value of `n_components` will change", +) +def test_parameter_checking(): + # Here we only check for invalid parameter values that are not already + # automatically tested in the common tests. + + A = np.ones((2, 2)) + + msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0" + with pytest.raises(ValueError, match=msg): + NMF(solver="cd", beta_loss=1.0).fit(A) + msg = "Negative values in data passed to" + with pytest.raises(ValueError, match=msg): + NMF().fit(-A) + clf = NMF(2, tol=0.1).fit(A) + with pytest.raises(ValueError, match=msg): + clf.transform(-A) + with pytest.raises(ValueError, match=msg): + nmf._initialize_nmf(-A, 2, "nndsvd") + + for init in ["nndsvd", "nndsvda", "nndsvdar"]: + msg = re.escape( + "init = '{}' can only be used when " + "n_components <= min(n_samples, n_features)".format(init) + ) + with pytest.raises(ValueError, match=msg): + NMF(3, init=init).fit(A) + with pytest.raises(ValueError, match=msg): + MiniBatchNMF(3, init=init).fit(A) + with pytest.raises(ValueError, match=msg): + nmf._initialize_nmf(A, 3, init) + + +def test_initialize_close(): + # Test NNDSVD error + # Test that _initialize_nmf error is less than the standard deviation of + # the entries in the matrix. + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + W, H = nmf._initialize_nmf(A, 10, init="nndsvd") + error = linalg.norm(np.dot(W, H) - A) + sdev = linalg.norm(A - A.mean()) + assert error <= sdev + + +def test_initialize_variants(): + # Test NNDSVD variants correctness + # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic + # 'nndsvd' only where the basic version has zeros. + rng = np.random.mtrand.RandomState(42) + data = np.abs(rng.randn(10, 10)) + W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd") + Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda") + War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0) + + for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)): + assert_almost_equal(evl[ref != 0], ref[ref != 0]) + + +# ignore UserWarning raised when both solver='mu' and init='nndsvd' +@ignore_warnings(category=UserWarning) +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random")) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H): + # Test that the decomposition does not contain negative values + A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)] + model = Estimator( + n_components=2, + init=init, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=0, + **solver, + ) + transf = model.fit_transform(A) + assert not ((model.components_ < 0).any() or (transf < 0).any()) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_fit_close(Estimator, solver): + rng = np.random.mtrand.RandomState(42) + # Test that the fit is not too far away + pnmf = Estimator( + 5, + init="nndsvdar", + random_state=0, + max_iter=600, + **solver, + ) + X = np.abs(rng.randn(6, 5)) + assert pnmf.fit(X).reconstruction_err_ < 0.1 + + +def test_nmf_true_reconstruction(): + # Test that the fit is not too far away from an exact solution + # (by construction) + n_samples = 15 + n_features = 10 + n_components = 5 + beta_loss = 1 + batch_size = 3 + max_iter = 1000 + + rng = np.random.mtrand.RandomState(42) + W_true = np.zeros([n_samples, n_components]) + W_array = np.abs(rng.randn(n_samples)) + for j in range(n_components): + W_true[j % n_samples, j] = W_array[j % n_samples] + H_true = np.zeros([n_components, n_features]) + H_array = np.abs(rng.randn(n_components)) + for j in range(n_features): + H_true[j % n_components, j] = H_array[j % n_components] + X = np.dot(W_true, H_true) + + model = NMF( + n_components=n_components, + solver="mu", + beta_loss=beta_loss, + max_iter=max_iter, + random_state=0, + ) + transf = model.fit_transform(X) + X_calc = np.dot(transf, model.components_) + + assert model.reconstruction_err_ < 0.1 + assert_allclose(X, X_calc) + + mbmodel = MiniBatchNMF( + n_components=n_components, + beta_loss=beta_loss, + batch_size=batch_size, + random_state=0, + max_iter=max_iter, + ) + transf = mbmodel.fit_transform(X) + X_calc = np.dot(transf, mbmodel.components_) + + assert mbmodel.reconstruction_err_ < 0.1 + assert_allclose(X, X_calc, atol=1) + + +@pytest.mark.parametrize("solver", ["cd", "mu"]) +def test_nmf_transform(solver): + # Test that fit_transform is equivalent to fit.transform for NMF + # Test that NMF.transform returns close values + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + m = NMF( + solver=solver, + n_components=3, + init="random", + random_state=0, + tol=1e-6, + ) + ft = m.fit_transform(A) + t = m.transform(A) + assert_allclose(ft, t, atol=1e-1) + + +def test_minibatch_nmf_transform(): + # Test that fit_transform is equivalent to fit.transform for MiniBatchNMF + # Only guaranteed with fresh restarts + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + m = MiniBatchNMF( + n_components=3, + random_state=0, + tol=1e-3, + fresh_restarts=True, + ) + ft = m.fit_transform(A) + t = m.transform(A) + assert_allclose(ft, t) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_transform_custom_init(Estimator, solver): + # Smoke test that checks if NMF.transform works with custom initialization + random_state = np.random.RandomState(0) + A = np.abs(random_state.randn(6, 5)) + n_components = 4 + avg = np.sqrt(A.mean() / n_components) + H_init = np.abs(avg * random_state.randn(n_components, 5)) + W_init = np.abs(avg * random_state.randn(6, n_components)) + + m = Estimator( + n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver + ) + m.fit_transform(A, W=W_init, H=H_init) + m.transform(A) + + +@pytest.mark.parametrize("solver", ("cd", "mu")) +def test_nmf_inverse_transform(solver): + # Test that NMF.inverse_transform returns close values + random_state = np.random.RandomState(0) + A = np.abs(random_state.randn(6, 4)) + m = NMF( + solver=solver, + n_components=4, + init="random", + random_state=0, + max_iter=1000, + ) + ft = m.fit_transform(A) + A_new = m.inverse_transform(ft) + assert_array_almost_equal(A, A_new, decimal=2) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +def test_mbnmf_inverse_transform(): + # Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform + # is close to the identity + rng = np.random.RandomState(0) + A = np.abs(rng.randn(6, 4)) + nmf = MiniBatchNMF( + random_state=rng, + max_iter=500, + init="nndsvdar", + fresh_restarts=True, + ) + ft = nmf.fit_transform(A) + A_new = nmf.inverse_transform(ft) + assert_allclose(A, A_new, rtol=1e-3, atol=1e-2) + + +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_n_components_greater_n_features(Estimator): + # Smoke test for the case of more components than features. + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(30, 10)) + Estimator(n_components=15, random_state=0, tol=1e-2).fit(A) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_nmf_sparse_input(Estimator, solver, sparse_container, alpha_W, alpha_H): + # Test that sparse matrices are accepted as input + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + A[:, 2 * np.arange(5)] = 0 + A_sparse = sparse_container(A) + + est1 = Estimator( + n_components=5, + init="random", + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=0, + tol=0, + max_iter=100, + **solver, + ) + est2 = clone(est1) + + W1 = est1.fit_transform(A) + W2 = est2.fit_transform(A_sparse) + H1 = est1.components_ + H2 = est2.components_ + + assert_allclose(W1, W2) + assert_allclose(H1, H2) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_nmf_sparse_transform(Estimator, solver, csc_container): + # Test that transform works on sparse data. Issue #2124 + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(3, 2)) + A[1, 1] = 0 + A = csc_container(A) + + model = Estimator(random_state=0, n_components=2, max_iter=400, **solver) + A_fit_tr = model.fit_transform(A) + A_tr = model.transform(A) + assert_allclose(A_fit_tr, A_tr, atol=1e-1) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("init", ["random", "nndsvd"]) +@pytest.mark.parametrize("solver", ("cd", "mu")) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H): + # Test that the function is called in the same way, either directly + # or through the NMF class + max_iter = 500 + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + A[:, 2 * np.arange(5)] = 0 + + W_nmf, H, _ = non_negative_factorization( + A, + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + W_nmf_2, H, _ = non_negative_factorization( + A, + H=H, + update_H=False, + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + + model_class = NMF( + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + W_cls = model_class.fit_transform(A) + W_cls_2 = model_class.transform(A) + + assert_allclose(W_nmf, W_cls) + assert_allclose(W_nmf_2, W_cls_2) + + +def test_non_negative_factorization_checking(): + # Note that the validity of parameter types and range of possible values + # for scalar numerical or str parameters is already checked in the common + # tests. Here we only check for problems that cannot be captured by simple + # declarative constraints on the valid parameter values. + + A = np.ones((2, 2)) + # Test parameters checking in public function + nnmf = non_negative_factorization + msg = re.escape("Negative values in data passed to NMF (input H)") + with pytest.raises(ValueError, match=msg): + nnmf(A, A, -A, 2, init="custom") + msg = re.escape("Negative values in data passed to NMF (input W)") + with pytest.raises(ValueError, match=msg): + nnmf(A, -A, A, 2, init="custom") + msg = re.escape("Array passed to NMF (input H) is full of zeros") + with pytest.raises(ValueError, match=msg): + nnmf(A, A, 0 * A, 2, init="custom") + + +def _beta_divergence_dense(X, W, H, beta): + """Compute the beta-divergence of X and W.H for dense array only. + + Used as a reference for testing nmf._beta_divergence. + """ + WH = np.dot(W, H) + + if beta == 2: + return squared_norm(X - WH) / 2 + + WH_Xnonzero = WH[X != 0] + X_nonzero = X[X != 0] + np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero) + + if beta == 1: + res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero)) + res += WH.sum() - X.sum() + + elif beta == 0: + div = X_nonzero / WH_Xnonzero + res = np.sum(div) - X.size - np.sum(np.log(div)) + else: + res = (X_nonzero**beta).sum() + res += (beta - 1) * (WH**beta).sum() + res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum() + res /= beta * (beta - 1) + + return res + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_beta_divergence(csr_container): + # Compare _beta_divergence with the reference _beta_divergence_dense + n_samples = 20 + n_features = 10 + n_components = 5 + beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0] + + # initialization + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta in beta_losses: + ref = _beta_divergence_dense(X, W, H, beta) + loss = nmf._beta_divergence(X, W, H, beta) + loss_csr = nmf._beta_divergence(X_csr, W, H, beta) + + assert_almost_equal(ref, loss, decimal=7) + assert_almost_equal(ref, loss_csr, decimal=7) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_special_sparse_dot(csr_container): + # Test the function that computes np.dot(W, H), only where X is non zero. + n_samples = 10 + n_features = 5 + n_components = 3 + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + + W = np.abs(rng.randn(n_samples, n_components)) + H = np.abs(rng.randn(n_components, n_features)) + + WH_safe = nmf._special_sparse_dot(W, H, X_csr) + WH = nmf._special_sparse_dot(W, H, X) + + # test that both results have same values, in X_csr nonzero elements + ii, jj = X_csr.nonzero() + WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel() + assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10) + + # test that WH_safe and X_csr have the same sparse structure + assert_array_equal(WH_safe.indices, X_csr.indices) + assert_array_equal(WH_safe.indptr, X_csr.indptr) + assert_array_equal(WH_safe.shape, X_csr.shape) + + +@ignore_warnings(category=ConvergenceWarning) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_nmf_multiplicative_update_sparse(csr_container): + # Compare sparse and dense input in multiplicative update NMF + # Also test continuity of the results with respect to beta_loss parameter + n_samples = 20 + n_features = 10 + n_components = 5 + alpha = 0.1 + l1_ratio = 0.5 + n_iter = 20 + + # initialization + rng = np.random.mtrand.RandomState(1337) + X = rng.randn(n_samples, n_features) + X = np.abs(X) + X_csr = csr_container(X) + W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5): + # Reference with dense array X + W, H = W0.copy(), H0.copy() + W1, H1, _ = non_negative_factorization( + X, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + # Compare with sparse X + W, H = W0.copy(), H0.copy() + W2, H2, _ = non_negative_factorization( + X_csr, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + assert_allclose(W1, W2, atol=1e-7) + assert_allclose(H1, H2, atol=1e-7) + + # Compare with almost same beta_loss, since some values have a specific + # behavior, but the results should be continuous w.r.t beta_loss + beta_loss -= 1.0e-5 + W, H = W0.copy(), H0.copy() + W3, H3, _ = non_negative_factorization( + X_csr, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + assert_allclose(W1, W3, atol=1e-4) + assert_allclose(H1, H3, atol=1e-4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_nmf_negative_beta_loss(csr_container): + # Test that an error is raised if beta_loss < 0 and X contains zeros. + # Test that the output has not NaN values when the input contains zeros. + n_samples = 6 + n_features = 5 + n_components = 3 + + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + + def _assert_nmf_no_nan(X, beta_loss): + W, H, _ = non_negative_factorization( + X, + init="random", + n_components=n_components, + solver="mu", + beta_loss=beta_loss, + random_state=0, + max_iter=1000, + ) + assert not np.any(np.isnan(W)) + assert not np.any(np.isnan(H)) + + msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." + for beta_loss in (-0.6, 0.0): + with pytest.raises(ValueError, match=msg): + _assert_nmf_no_nan(X, beta_loss) + _assert_nmf_no_nan(X + 1e-9, beta_loss) + + for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5): + _assert_nmf_no_nan(X, beta_loss) + _assert_nmf_no_nan(X_csr, beta_loss) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("beta_loss", [-0.5, 0.0]) +def test_minibatch_nmf_negative_beta_loss(beta_loss): + """Check that an error is raised if beta_loss < 0 and X contains zeros.""" + rng = np.random.RandomState(0) + X = rng.normal(size=(6, 5)) + X[X < 0] = 0 + + nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0) + + msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." + with pytest.raises(ValueError, match=msg): + nmf.fit(X) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_regularization(Estimator, solver): + # Test the effect of L1 and L2 regularizations + n_samples = 6 + n_features = 5 + n_components = 3 + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(n_samples, n_features)) + + # L1 regularization should increase the number of zeros + l1_ratio = 1.0 + regul = Estimator( + n_components=n_components, + alpha_W=0.5, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + model = Estimator( + n_components=n_components, + alpha_W=0.0, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + + W_regul = regul.fit_transform(X) + W_model = model.fit_transform(X) + + H_regul = regul.components_ + H_model = model.components_ + + eps = np.finfo(np.float64).eps + W_regul_n_zeros = W_regul[W_regul <= eps].size + W_model_n_zeros = W_model[W_model <= eps].size + H_regul_n_zeros = H_regul[H_regul <= eps].size + H_model_n_zeros = H_model[H_model <= eps].size + + assert W_regul_n_zeros > W_model_n_zeros + assert H_regul_n_zeros > H_model_n_zeros + + # L2 regularization should decrease the sum of the squared norm + # of the matrices W and H + l1_ratio = 0.0 + regul = Estimator( + n_components=n_components, + alpha_W=0.5, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + model = Estimator( + n_components=n_components, + alpha_W=0.0, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + + W_regul = regul.fit_transform(X) + W_model = model.fit_transform(X) + + H_regul = regul.components_ + H_model = model.components_ + + assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > ( + linalg.norm(W_regul) + ) ** 2.0 + (linalg.norm(H_regul)) ** 2.0 + + +@ignore_warnings(category=ConvergenceWarning) +@pytest.mark.parametrize("solver", ("cd", "mu")) +def test_nmf_decreasing(solver): + # test that the objective function is decreasing at each iteration + n_samples = 20 + n_features = 15 + n_components = 10 + alpha = 0.1 + l1_ratio = 0.5 + tol = 0.0 + + # initialization + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.abs(X, X) + W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5): + if solver != "mu" and beta_loss != 2: + # not implemented + continue + W, H = W0.copy(), H0.copy() + previous_loss = None + for _ in range(30): + # one more iteration starting from the previous results + W, H, _ = non_negative_factorization( + X, + W, + H, + beta_loss=beta_loss, + init="custom", + n_components=n_components, + max_iter=1, + alpha_W=alpha, + solver=solver, + tol=tol, + l1_ratio=l1_ratio, + verbose=0, + random_state=0, + update_H=True, + ) + + loss = ( + nmf._beta_divergence(X, W, H, beta_loss) + + alpha * l1_ratio * n_features * W.sum() + + alpha * l1_ratio * n_samples * H.sum() + + alpha * (1 - l1_ratio) * n_features * (W**2).sum() + + alpha * (1 - l1_ratio) * n_samples * (H**2).sum() + ) + if previous_loss is not None: + assert previous_loss > loss + previous_loss = loss + + +def test_nmf_underflow(): + # Regression test for an underflow issue in _beta_divergence + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 10, 2, 2 + X = np.abs(rng.randn(n_samples, n_features)) * 10 + W = np.abs(rng.randn(n_samples, n_components)) * 10 + H = np.abs(rng.randn(n_components, n_features)) + + X[0, 0] = 0 + ref = nmf._beta_divergence(X, W, H, beta=1.0) + X[0, 0] = 1e-323 + res = nmf._beta_divergence(X, W, H, beta=1.0) + assert_almost_equal(res, ref) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize( + "dtype_in, dtype_out", + [ + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ], +) +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out): + # Check that NMF preserves dtype (float32 and float64) + X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False) + np.abs(X, out=X) + + nmf = Estimator( + alpha_W=1.0, + alpha_H=1.0, + tol=1e-2, + random_state=0, + **solver, + ) + + assert nmf.fit(X).transform(X).dtype == dtype_out + assert nmf.fit_transform(X).dtype == dtype_out + assert nmf.components_.dtype == dtype_out + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_float32_float64_consistency(Estimator, solver): + # Check that the result of NMF is the same between float32 and float64 + X = np.random.RandomState(0).randn(50, 7) + np.abs(X, out=X) + nmf32 = Estimator(random_state=0, tol=1e-3, **solver) + W32 = nmf32.fit_transform(X.astype(np.float32)) + nmf64 = Estimator(random_state=0, tol=1e-3, **solver) + W64 = nmf64.fit_transform(X) + + assert_allclose(W32, W64, atol=1e-5) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_nmf_custom_init_dtype_error(Estimator): + # Check that an error is raise if custom H and/or W don't have the same + # dtype as X. + rng = np.random.RandomState(0) + X = rng.random_sample((20, 15)) + H = rng.random_sample((15, 15)).astype(np.float32) + W = rng.random_sample((20, 15)) + + with pytest.raises(TypeError, match="should have the same dtype as X"): + Estimator(init="custom").fit(X, H=H, W=W) + + with pytest.raises(TypeError, match="should have the same dtype as X"): + non_negative_factorization(X, H=H, update_H=False) + + +@pytest.mark.parametrize("beta_loss", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5]) +def test_nmf_minibatchnmf_equivalence(beta_loss): + # Test that MiniBatchNMF is equivalent to NMF when batch_size = n_samples and + # forget_factor 0.0 (stopping criterion put aside) + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(48, 5)) + + nmf = NMF( + n_components=5, + beta_loss=beta_loss, + solver="mu", + random_state=0, + tol=0, + ) + mbnmf = MiniBatchNMF( + n_components=5, + beta_loss=beta_loss, + random_state=0, + tol=0, + max_no_improvement=None, + batch_size=X.shape[0], + forget_factor=0.0, + ) + W = nmf.fit_transform(X) + mbW = mbnmf.fit_transform(X) + assert_allclose(W, mbW) + + +def test_minibatch_nmf_partial_fit(): + # Check fit / partial_fit equivalence. Applicable only with fresh restarts. + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(100, 5)) + + n_components = 5 + batch_size = 10 + max_iter = 2 + + mbnmf1 = MiniBatchNMF( + n_components=n_components, + init="custom", + random_state=0, + max_iter=max_iter, + batch_size=batch_size, + tol=0, + max_no_improvement=None, + fresh_restarts=False, + ) + mbnmf2 = MiniBatchNMF(n_components=n_components, init="custom", random_state=0) + + # Force the same init of H (W is recomputed anyway) to be able to compare results. + W, H = nmf._initialize_nmf( + X, n_components=n_components, init="random", random_state=0 + ) + + mbnmf1.fit(X, W=W, H=H) + for i in range(max_iter): + for j in range(batch_size): + mbnmf2.partial_fit(X[j : j + batch_size], W=W[:batch_size], H=H) + + assert mbnmf1.n_steps_ == mbnmf2.n_steps_ + assert_allclose(mbnmf1.components_, mbnmf2.components_) + + +def test_feature_names_out(): + """Check feature names out for NMF.""" + random_state = np.random.RandomState(0) + X = np.abs(random_state.randn(10, 4)) + nmf = NMF(n_components=3).fit(X) + + names = nmf.get_feature_names_out() + assert_array_equal([f"nmf{i}" for i in range(3)], names) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +def test_minibatch_nmf_verbose(): + # Check verbose mode of MiniBatchNMF for better coverage. + A = np.random.RandomState(0).random_sample((100, 10)) + nmf = MiniBatchNMF(tol=1e-2, random_state=0, verbose=1) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + nmf.fit(A) + finally: + sys.stdout = old_stdout + + +# TODO(1.5): remove this test +def test_NMF_inverse_transform_W_deprecation(): + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + est = NMF( + n_components=3, + init="random", + random_state=0, + tol=1e-6, + ) + Xt = est.fit_transform(A) + + with pytest.raises(TypeError, match="Missing required positional argument"): + est.inverse_transform() + + with pytest.raises(ValueError, match="Please provide only"): + est.inverse_transform(Xt=Xt, W=Xt) + + with warnings.catch_warnings(record=True): + warnings.simplefilter("error") + est.inverse_transform(Xt) + + with pytest.warns(FutureWarning, match="Input argument `W` was renamed to `Xt`"): + est.inverse_transform(W=Xt) + + +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_nmf_n_components_auto(Estimator): + # Check that n_components is correctly inferred + # from the provided custom initialization. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W = rng.random_sample((6, 2)) + H = rng.random_sample((2, 5)) + est = Estimator( + n_components="auto", + init="custom", + random_state=0, + tol=1e-6, + ) + est.fit_transform(X, W=W, H=H) + assert est._n_components == H.shape[0] + + +def test_nmf_non_negative_factorization_n_components_auto(): + # Check that n_components is correctly inferred from the provided + # custom initialization. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W_init = rng.random_sample((6, 2)) + H_init = rng.random_sample((2, 5)) + W, H, _ = non_negative_factorization( + X, W=W_init, H=H_init, init="custom", n_components="auto" + ) + assert H.shape == H_init.shape + assert W.shape == W_init.shape + + +# TODO(1.6): remove +def test_nmf_n_components_default_value_warning(): + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H = rng.random_sample((2, 5)) + with pytest.warns( + FutureWarning, match="The default value of `n_components` will change from" + ): + non_negative_factorization(X, H=H) + + +def test_nmf_n_components_auto_no_h_update(): + # Tests that non_negative_factorization does not fail when setting + # n_components="auto" also tests that the inferred n_component + # value is the right one. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H_true = rng.random_sample((2, 5)) + W, H, _ = non_negative_factorization( + X, H=H_true, n_components="auto", update_H=False + ) # should not fail + assert_allclose(H, H_true) + assert W.shape == (X.shape[0], H_true.shape[0]) + + +def test_nmf_w_h_not_used_warning(): + # Check that warnings are raised if user provided W and H are not used + # and initialization overrides value of W or H + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W_init = rng.random_sample((6, 2)) + H_init = rng.random_sample((2, 5)) + with pytest.warns( + RuntimeWarning, + match="When init!='custom', provided W or H are ignored", + ): + non_negative_factorization(X, H=H_init, update_H=True, n_components="auto") + + with pytest.warns( + RuntimeWarning, + match="When init!='custom', provided W or H are ignored", + ): + non_negative_factorization( + X, W=W_init, H=H_init, update_H=True, n_components="auto" + ) + + with pytest.warns( + RuntimeWarning, match="When update_H=False, the provided initial W is not used." + ): + # When update_H is False, W is ignored regardless of init + # TODO: use the provided W when init="custom". + non_negative_factorization( + X, W=W_init, H=H_init, update_H=False, n_components="auto" + ) + + +def test_nmf_custom_init_shape_error(): + # Check that an informative error is raised when custom initialization does not + # have the right shape + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H = rng.random_sample((2, 5)) + nmf = NMF(n_components=2, init="custom", random_state=0) + + with pytest.raises(ValueError, match="Array with wrong first dimension passed"): + nmf.fit(X, H=H, W=rng.random_sample((5, 2))) + + with pytest.raises(ValueError, match="Array with wrong second dimension passed"): + nmf.fit(X, H=H, W=rng.random_sample((6, 3))) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..44281b9038697e56228a8e0584f7c8ba81d8b969 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py @@ -0,0 +1,987 @@ +import re +import warnings + +import numpy as np +import pytest +import scipy as sp +from numpy.testing import assert_array_equal + +from sklearn import config_context, datasets +from sklearn.base import clone +from sklearn.datasets import load_iris, make_classification +from sklearn.decomposition import PCA +from sklearn.decomposition._pca import _assess_dimension, _infer_dimension +from sklearn.utils._array_api import ( + _atol_for_type, + _convert_to_numpy, + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._array_api import device as array_device +from sklearn.utils._testing import _array_api_for_tests, assert_allclose +from sklearn.utils.estimator_checks import ( + _get_check_estimator_ids, + check_array_api_input_and_values, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +iris = datasets.load_iris() +PCA_SOLVERS = ["full", "arpack", "randomized", "auto"] + +# `SPARSE_M` and `SPARSE_N` could be larger, but be aware: +# * SciPy's generation of random sparse matrix can be costly +# * A (SPARSE_M, SPARSE_N) dense array is allocated to compare against +SPARSE_M, SPARSE_N = 1000, 300 # arbitrary +SPARSE_MAX_COMPONENTS = min(SPARSE_M, SPARSE_N) + + +def _check_fitted_pca_close(pca1, pca2, rtol): + assert_allclose(pca1.components_, pca2.components_, rtol=rtol) + assert_allclose(pca1.explained_variance_, pca2.explained_variance_, rtol=rtol) + assert_allclose(pca1.singular_values_, pca2.singular_values_, rtol=rtol) + assert_allclose(pca1.mean_, pca2.mean_, rtol=rtol) + assert_allclose(pca1.n_components_, pca2.n_components_, rtol=rtol) + assert_allclose(pca1.n_samples_, pca2.n_samples_, rtol=rtol) + assert_allclose(pca1.noise_variance_, pca2.noise_variance_, rtol=rtol) + assert_allclose(pca1.n_features_in_, pca2.n_features_in_, rtol=rtol) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +@pytest.mark.parametrize("n_components", range(1, iris.data.shape[1])) +def test_pca(svd_solver, n_components): + X = iris.data + pca = PCA(n_components=n_components, svd_solver=svd_solver) + + # check the shape of fit.transform + X_r = pca.fit(X).transform(X) + assert X_r.shape[1] == n_components + + # check the equivalence of fit.transform and fit_transform + X_r2 = pca.fit_transform(X) + assert_allclose(X_r, X_r2) + X_r = pca.transform(X) + assert_allclose(X_r, X_r2) + + # Test get_covariance and get_precision + cov = pca.get_covariance() + precision = pca.get_precision() + assert_allclose(np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-12) + + +@pytest.mark.parametrize("density", [0.01, 0.1, 0.30]) +@pytest.mark.parametrize("n_components", [1, 2, 10]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +@pytest.mark.parametrize("svd_solver", ["arpack"]) +@pytest.mark.parametrize("scale", [1, 10, 100]) +def test_pca_sparse( + global_random_seed, svd_solver, sparse_container, n_components, density, scale +): + # Make sure any tolerance changes pass with SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all" + rtol = 5e-07 + transform_rtol = 3e-05 + + random_state = np.random.default_rng(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=density, + ) + ) + # Scale the data + vary the column means + scale_vector = random_state.random(X.shape[1]) * scale + X = X.multiply(scale_vector) + + pca = PCA( + n_components=n_components, + svd_solver=svd_solver, + random_state=global_random_seed, + ) + pca.fit(X) + + Xd = X.toarray() + pcad = PCA( + n_components=n_components, + svd_solver=svd_solver, + random_state=global_random_seed, + ) + pcad.fit(Xd) + + # Fitted attributes equality + _check_fitted_pca_close(pca, pcad, rtol=rtol) + + # Test transform + X2 = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=density, + ) + ) + X2d = X2.toarray() + + assert_allclose(pca.transform(X2), pca.transform(X2d), rtol=transform_rtol) + assert_allclose(pca.transform(X2), pcad.transform(X2d), rtol=transform_rtol) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_pca_sparse_fit_transform(global_random_seed, sparse_container): + random_state = np.random.default_rng(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=0.01, + ) + ) + X2 = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=0.01, + ) + ) + + pca_fit = PCA(n_components=10, svd_solver="arpack", random_state=global_random_seed) + pca_fit_transform = PCA( + n_components=10, svd_solver="arpack", random_state=global_random_seed + ) + + pca_fit.fit(X) + transformed_X = pca_fit_transform.fit_transform(X) + + _check_fitted_pca_close(pca_fit, pca_fit_transform, rtol=1e-10) + assert_allclose(transformed_X, pca_fit_transform.transform(X), rtol=2e-9) + assert_allclose(transformed_X, pca_fit.transform(X), rtol=2e-9) + assert_allclose(pca_fit.transform(X2), pca_fit_transform.transform(X2), rtol=2e-9) + + +@pytest.mark.parametrize("svd_solver", ["randomized", "full", "auto"]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_sparse_pca_solver_error(global_random_seed, svd_solver, sparse_container): + random_state = np.random.RandomState(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + ) + ) + pca = PCA(n_components=30, svd_solver=svd_solver) + error_msg_pattern = ( + f'PCA only support sparse inputs with the "arpack" solver, while "{svd_solver}"' + " was passed" + ) + with pytest.raises(TypeError, match=error_msg_pattern): + pca.fit(X) + + +def test_no_empty_slice_warning(): + # test if we avoid numpy warnings for computing over empty arrays + n_components = 10 + n_features = n_components + 2 # anything > n_comps triggered it in 0.16 + X = np.random.uniform(-1, 1, size=(n_components, n_features)) + pca = PCA(n_components=n_components) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + pca.fit(X) + + +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize("solver", PCA_SOLVERS) +def test_whitening(solver, copy): + # Check that PCA output has unit-variance + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 80 + n_components = 30 + rank = 50 + + # some low rank data with correlated features + X = np.dot( + rng.randn(n_samples, rank), + np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features)), + ) + # the component-wise variance of the first 50 features is 3 times the + # mean component-wise variance of the remaining 30 features + X[:, :50] *= 3 + + assert X.shape == (n_samples, n_features) + + # the component-wise variance is thus highly varying: + assert X.std(axis=0).std() > 43.8 + + # whiten the data while projecting to the lower dim subspace + X_ = X.copy() # make sure we keep an original across iterations. + pca = PCA( + n_components=n_components, + whiten=True, + copy=copy, + svd_solver=solver, + random_state=0, + iterated_power=7, + ) + # test fit_transform + X_whitened = pca.fit_transform(X_.copy()) + assert X_whitened.shape == (n_samples, n_components) + X_whitened2 = pca.transform(X_) + assert_allclose(X_whitened, X_whitened2, rtol=5e-4) + + assert_allclose(X_whitened.std(ddof=1, axis=0), np.ones(n_components)) + assert_allclose(X_whitened.mean(axis=0), np.zeros(n_components), atol=1e-12) + + X_ = X.copy() + pca = PCA( + n_components=n_components, whiten=False, copy=copy, svd_solver=solver + ).fit(X_.copy()) + X_unwhitened = pca.transform(X_) + assert X_unwhitened.shape == (n_samples, n_components) + + # in that case the output components still have varying variances + assert X_unwhitened.std(axis=0).std() == pytest.approx(74.1, rel=1e-1) + # we always center, so no test for non-centering. + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_explained_variance_equivalence_solver(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca_full = PCA(n_components=2, svd_solver="full") + pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=0) + + pca_full.fit(X) + pca_other.fit(X) + + assert_allclose( + pca_full.explained_variance_, pca_other.explained_variance_, rtol=5e-2 + ) + assert_allclose( + pca_full.explained_variance_ratio_, + pca_other.explained_variance_ratio_, + rtol=5e-2, + ) + + +@pytest.mark.parametrize( + "X", + [ + np.random.RandomState(0).randn(100, 80), + datasets.make_classification(100, 80, n_informative=78, random_state=0)[0], + ], + ids=["random-data", "correlated-data"], +) +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_explained_variance_empirical(X, svd_solver): + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=0) + X_pca = pca.fit_transform(X) + assert_allclose(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0)) + + expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0] + expected_result = sorted(expected_result, reverse=True)[:2] + assert_allclose(pca.explained_variance_, expected_result, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_singular_values_consistency(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca_full = PCA(n_components=2, svd_solver="full", random_state=rng) + pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + + pca_full.fit(X) + pca_other.fit(X) + + assert_allclose(pca_full.singular_values_, pca_other.singular_values_, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_singular_values(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + X_trans = pca.fit_transform(X) + + # compare to the Frobenius norm + assert_allclose( + np.sum(pca.singular_values_**2), np.linalg.norm(X_trans, "fro") ** 2 + ) + # Compare to the 2-norms of the score vectors + assert_allclose(pca.singular_values_, np.sqrt(np.sum(X_trans**2, axis=0))) + + # set the singular values and see what er get back + n_samples, n_features = 100, 110 + X = rng.randn(n_samples, n_features) + + pca = PCA(n_components=3, svd_solver=svd_solver, random_state=rng) + X_trans = pca.fit_transform(X) + X_trans /= np.sqrt(np.sum(X_trans**2, axis=0)) + X_trans[:, 0] *= 3.142 + X_trans[:, 1] *= 2.718 + X_hat = np.dot(X_trans, pca.components_) + pca.fit(X_hat) + assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0]) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_check_projection(svd_solver): + # Test that the projection of data is correct + rng = np.random.RandomState(0) + n, p = 100, 3 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5]) + Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) + + Yt = PCA(n_components=2, svd_solver=svd_solver).fit(X).transform(Xt) + Yt /= np.sqrt((Yt**2).sum()) + + assert_allclose(np.abs(Yt[0][0]), 1.0, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_check_projection_list(svd_solver): + # Test that the projection of data is correct + X = [[1.0, 0.0], [0.0, 1.0]] + pca = PCA(n_components=1, svd_solver=svd_solver, random_state=0) + X_trans = pca.fit_transform(X) + assert X_trans.shape, (2, 1) + assert_allclose(X_trans.mean(), 0.00, atol=1e-12) + assert_allclose(X_trans.std(), 0.71, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", ["full", "arpack", "randomized"]) +@pytest.mark.parametrize("whiten", [False, True]) +def test_pca_inverse(svd_solver, whiten): + # Test that the projection of data can be inverted + rng = np.random.RandomState(0) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + pca = PCA(n_components=2, svd_solver=svd_solver, whiten=whiten).fit(X) + Y = pca.transform(X) + Y_inverse = pca.inverse_transform(Y) + assert_allclose(X, Y_inverse, rtol=5e-6) + + +@pytest.mark.parametrize( + "data", [np.array([[0, 1, 0], [1, 0, 0]]), np.array([[0, 1, 0], [1, 0, 0]]).T] +) +@pytest.mark.parametrize( + "svd_solver, n_components, err_msg", + [ + ("arpack", 0, r"must be between 1 and min\(n_samples, n_features\)"), + ("randomized", 0, r"must be between 1 and min\(n_samples, n_features\)"), + ("arpack", 2, r"must be strictly less than min"), + ( + "auto", + 3, + ( + r"n_components=3 must be between 0 and min\(n_samples, " + r"n_features\)=2 with svd_solver='full'" + ), + ), + ], +) +def test_pca_validation(svd_solver, data, n_components, err_msg): + # Ensures that solver-specific extreme inputs for the n_components + # parameter raise errors + smallest_d = 2 # The smallest dimension + pca_fitted = PCA(n_components, svd_solver=svd_solver) + + with pytest.raises(ValueError, match=err_msg): + pca_fitted.fit(data) + + # Additional case for arpack + if svd_solver == "arpack": + n_components = smallest_d + + err_msg = ( + "n_components={}L? must be strictly less than " + r"min\(n_samples, n_features\)={}L? with " + "svd_solver='arpack'".format(n_components, smallest_d) + ) + with pytest.raises(ValueError, match=err_msg): + PCA(n_components, svd_solver=svd_solver).fit(data) + + +@pytest.mark.parametrize( + "solver, n_components_", + [ + ("full", min(iris.data.shape)), + ("arpack", min(iris.data.shape) - 1), + ("randomized", min(iris.data.shape)), + ], +) +@pytest.mark.parametrize("data", [iris.data, iris.data.T]) +def test_n_components_none(data, solver, n_components_): + pca = PCA(svd_solver=solver) + pca.fit(data) + assert pca.n_components_ == n_components_ + + +@pytest.mark.parametrize("svd_solver", ["auto", "full"]) +def test_n_components_mle(svd_solver): + # Ensure that n_components == 'mle' doesn't raise error for auto/full + rng = np.random.RandomState(0) + n_samples, n_features = 600, 10 + X = rng.randn(n_samples, n_features) + pca = PCA(n_components="mle", svd_solver=svd_solver) + pca.fit(X) + assert pca.n_components_ == 1 + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_n_components_mle_error(svd_solver): + # Ensure that n_components == 'mle' will raise an error for unsupported + # solvers + rng = np.random.RandomState(0) + n_samples, n_features = 600, 10 + X = rng.randn(n_samples, n_features) + pca = PCA(n_components="mle", svd_solver=svd_solver) + err_msg = "n_components='mle' cannot be a string with svd_solver='{}'".format( + svd_solver + ) + with pytest.raises(ValueError, match=err_msg): + pca.fit(X) + + +def test_pca_dim(): + # Check automated dimensionality setting + rng = np.random.RandomState(0) + n, p = 100, 5 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + pca = PCA(n_components="mle", svd_solver="full").fit(X) + assert pca.n_components == "mle" + assert pca.n_components_ == 1 + + +def test_infer_dim_1(): + # TODO: explain what this is testing + # Or at least use explicit variable names... + n, p = 1000, 5 + rng = np.random.RandomState(0) + X = ( + rng.randn(n, p) * 0.1 + + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) + + np.array([1, 0, 7, 4, 6]) + ) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + ll = np.array([_assess_dimension(spect, k, n) for k in range(1, p)]) + assert ll[1] > ll.max() - 0.01 * n + + +def test_infer_dim_2(): + # TODO: explain what this is testing + # Or at least use explicit variable names... + n, p = 1000, 5 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + X[10:20] += np.array([6, 0, 7, 2, -1]) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + assert _infer_dimension(spect, n) > 1 + + +def test_infer_dim_3(): + n, p = 100, 5 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + X[10:20] += np.array([6, 0, 7, 2, -1]) + X[30:40] += 2 * np.array([-1, 1, -1, 1, -1]) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + assert _infer_dimension(spect, n) > 2 + + +@pytest.mark.parametrize( + "X, n_components, n_components_validated", + [ + (iris.data, 0.95, 2), # row > col + (iris.data, 0.01, 1), # row > col + (np.random.RandomState(0).rand(5, 20), 0.5, 2), + ], # row < col +) +def test_infer_dim_by_explained_variance(X, n_components, n_components_validated): + pca = PCA(n_components=n_components, svd_solver="full") + pca.fit(X) + assert pca.n_components == pytest.approx(n_components) + assert pca.n_components_ == n_components_validated + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_score(svd_solver): + # Test that probabilistic PCA scoring yields a reasonable score + n, p = 1000, 3 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5]) + pca = PCA(n_components=2, svd_solver=svd_solver) + pca.fit(X) + + ll1 = pca.score(X) + h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1**2) * p + assert_allclose(ll1 / h, 1, rtol=5e-2) + + ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5])) + assert ll1 > ll2 + + pca = PCA(n_components=2, whiten=True, svd_solver=svd_solver) + pca.fit(X) + ll2 = pca.score(X) + assert ll1 > ll2 + + +def test_pca_score3(): + # Check that probabilistic PCA selects the right model + n, p = 200, 3 + rng = np.random.RandomState(0) + Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7]) + Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7]) + ll = np.zeros(p) + for k in range(p): + pca = PCA(n_components=k, svd_solver="full") + pca.fit(Xl) + ll[k] = pca.score(Xt) + + assert ll.argmax() == 1 + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_sanity_noise_variance(svd_solver): + # Sanity check for the noise_variance_. For more details see + # https://github.com/scikit-learn/scikit-learn/issues/7568 + # https://github.com/scikit-learn/scikit-learn/issues/8541 + # https://github.com/scikit-learn/scikit-learn/issues/8544 + X, _ = datasets.load_digits(return_X_y=True) + pca = PCA(n_components=30, svd_solver=svd_solver, random_state=0) + pca.fit(X) + assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0) + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_score_consistency_solvers(svd_solver): + # Check the consistency of score between solvers + X, _ = datasets.load_digits(return_X_y=True) + pca_full = PCA(n_components=30, svd_solver="full", random_state=0) + pca_other = PCA(n_components=30, svd_solver=svd_solver, random_state=0) + pca_full.fit(X) + pca_other.fit(X) + assert_allclose(pca_full.score(X), pca_other.score(X), rtol=5e-6) + + +# arpack raises ValueError for n_components == min(n_samples, n_features) +@pytest.mark.parametrize("svd_solver", ["full", "randomized"]) +def test_pca_zero_noise_variance_edge_cases(svd_solver): + # ensure that noise_variance_ is 0 in edge cases + # when n_components == min(n_samples, n_features) + n, p = 100, 3 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5]) + + pca = PCA(n_components=p, svd_solver=svd_solver) + pca.fit(X) + assert pca.noise_variance_ == 0 + # Non-regression test for gh-12489 + # ensure no divide-by-zero error for n_components == n_features < n_samples + pca.score(X) + + pca.fit(X.T) + assert pca.noise_variance_ == 0 + # Non-regression test for gh-12489 + # ensure no divide-by-zero error for n_components == n_samples < n_features + pca.score(X.T) + + +@pytest.mark.parametrize( + "data, n_components, expected_solver", + [ # case: n_components in (0,1) => 'full' + (np.random.RandomState(0).uniform(size=(1000, 50)), 0.5, "full"), + # case: max(X.shape) <= 500 => 'full' + (np.random.RandomState(0).uniform(size=(10, 50)), 5, "full"), + # case: n_components >= .8 * min(X.shape) => 'full' + (np.random.RandomState(0).uniform(size=(1000, 50)), 50, "full"), + # n_components >= 1 and n_components < .8*min(X.shape) => 'randomized' + (np.random.RandomState(0).uniform(size=(1000, 50)), 10, "randomized"), + ], +) +def test_pca_svd_solver_auto(data, n_components, expected_solver): + pca_auto = PCA(n_components=n_components, random_state=0) + pca_test = PCA( + n_components=n_components, svd_solver=expected_solver, random_state=0 + ) + pca_auto.fit(data) + pca_test.fit(data) + assert_allclose(pca_auto.components_, pca_test.components_) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_deterministic_output(svd_solver): + rng = np.random.RandomState(0) + X = rng.rand(10, 10) + + transformed_X = np.zeros((20, 2)) + for i in range(20): + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + transformed_X[i, :] = pca.fit_transform(X)[0] + assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2)) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_dtype_preservation(svd_solver): + check_pca_float_dtype_preservation(svd_solver) + check_pca_int_dtype_upcast_to_double(svd_solver) + + +def check_pca_float_dtype_preservation(svd_solver): + # Ensure that PCA does not upscale the dtype when input is float32 + X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False) + X_32 = X_64.astype(np.float32) + + pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64) + pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32) + + assert pca_64.components_.dtype == np.float64 + assert pca_32.components_.dtype == np.float32 + assert pca_64.transform(X_64).dtype == np.float64 + assert pca_32.transform(X_32).dtype == np.float32 + + # the rtol is set such that the test passes on all platforms tested on + # conda-forge: PR#15775 + # see: https://github.com/conda-forge/scikit-learn-feedstock/pull/113 + assert_allclose(pca_64.components_, pca_32.components_, rtol=2e-4) + + +def check_pca_int_dtype_upcast_to_double(svd_solver): + # Ensure that all int types will be upcast to float64 + X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4)) + X_i64 = X_i64.astype(np.int64, copy=False) + X_i32 = X_i64.astype(np.int32, copy=False) + + pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64) + pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32) + + assert pca_64.components_.dtype == np.float64 + assert pca_32.components_.dtype == np.float64 + assert pca_64.transform(X_i64).dtype == np.float64 + assert pca_32.transform(X_i32).dtype == np.float64 + + assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-4) + + +def test_pca_n_components_mostly_explained_variance_ratio(): + # when n_components is the second highest cumulative sum of the + # explained_variance_ratio_, then n_components_ should equal the + # number of features in the dataset #15669 + X, y = load_iris(return_X_y=True) + pca1 = PCA().fit(X, y) + + n_components = pca1.explained_variance_ratio_.cumsum()[-2] + pca2 = PCA(n_components=n_components).fit(X, y) + assert pca2.n_components_ == X.shape[1] + + +def test_assess_dimension_bad_rank(): + # Test error when tested rank not in [1, n_features - 1] + spectrum = np.array([1, 1e-30, 1e-30, 1e-30]) + n_samples = 10 + for rank in (0, 5): + with pytest.raises(ValueError, match=r"should be in \[1, n_features - 1\]"): + _assess_dimension(spectrum, rank, n_samples) + + +def test_small_eigenvalues_mle(): + # Test rank associated with tiny eigenvalues are given a log-likelihood of + # -inf. The inferred rank will be 1 + spectrum = np.array([1, 1e-30, 1e-30, 1e-30]) + + assert _assess_dimension(spectrum, rank=1, n_samples=10) > -np.inf + + for rank in (2, 3): + assert _assess_dimension(spectrum, rank, 10) == -np.inf + + assert _infer_dimension(spectrum, 10) == 1 + + +def test_mle_redundant_data(): + # Test 'mle' with pathological X: only one relevant feature should give a + # rank of 1 + X, _ = datasets.make_classification( + n_features=20, + n_informative=1, + n_repeated=18, + n_redundant=1, + n_clusters_per_class=1, + random_state=42, + ) + pca = PCA(n_components="mle").fit(X) + assert pca.n_components_ == 1 + + +def test_fit_mle_too_few_samples(): + # Tests that an error is raised when the number of samples is smaller + # than the number of features during an mle fit + X, _ = datasets.make_classification(n_samples=20, n_features=21, random_state=42) + + pca = PCA(n_components="mle", svd_solver="full") + with pytest.raises( + ValueError, + match="n_components='mle' is only supported if n_samples >= n_features", + ): + pca.fit(X) + + +def test_mle_simple_case(): + # non-regression test for issue + # https://github.com/scikit-learn/scikit-learn/issues/16730 + n_samples, n_dim = 1000, 10 + X = np.random.RandomState(0).randn(n_samples, n_dim) + X[:, -1] = np.mean(X[:, :-1], axis=-1) # true X dim is ndim - 1 + pca_skl = PCA("mle", svd_solver="full") + pca_skl.fit(X) + assert pca_skl.n_components_ == n_dim - 1 + + +def test_assess_dimesion_rank_one(): + # Make sure assess_dimension works properly on a matrix of rank 1 + n_samples, n_features = 9, 6 + X = np.ones((n_samples, n_features)) # rank 1 matrix + _, s, _ = np.linalg.svd(X, full_matrices=True) + # except for rank 1, all eigenvalues are 0 resp. close to 0 (FP) + assert_allclose(s[1:], np.zeros(n_features - 1), atol=1e-12) + + assert np.isfinite(_assess_dimension(s, rank=1, n_samples=n_samples)) + for rank in range(2, n_features): + assert _assess_dimension(s, rank, n_samples) == -np.inf + + +def test_pca_randomized_svd_n_oversamples(): + """Check that exposing and setting `n_oversamples` will provide accurate results + even when `X` as a large number of features. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20589 + """ + rng = np.random.RandomState(0) + n_features = 100 + X = rng.randn(1_000, n_features) + + # The default value of `n_oversamples` will lead to inaccurate results + # We force it to the number of features. + pca_randomized = PCA( + n_components=1, + svd_solver="randomized", + n_oversamples=n_features, + random_state=0, + ).fit(X) + pca_full = PCA(n_components=1, svd_solver="full").fit(X) + pca_arpack = PCA(n_components=1, svd_solver="arpack", random_state=0).fit(X) + + assert_allclose(np.abs(pca_full.components_), np.abs(pca_arpack.components_)) + assert_allclose(np.abs(pca_randomized.components_), np.abs(pca_arpack.components_)) + + +def test_feature_names_out(): + """Check feature names out for PCA.""" + pca = PCA(n_components=2).fit(iris.data) + + names = pca.get_feature_names_out() + assert_array_equal([f"pca{i}" for i in range(2)], names) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_variance_correctness(copy): + """Check the accuracy of PCA's internal variance calculation""" + rng = np.random.RandomState(0) + X = rng.randn(1000, 200) + pca = PCA().fit(X) + pca_var = pca.explained_variance_ / pca.explained_variance_ratio_ + true_var = np.var(X, ddof=1, axis=0).sum() + np.testing.assert_allclose(pca_var, true_var) + + +def check_array_api_get_precision(name, estimator, array_namespace, device, dtype_name): + xp = _array_api_for_tests(array_namespace, device) + iris_np = iris.data.astype(dtype_name) + iris_xp = xp.asarray(iris_np, device=device) + + estimator.fit(iris_np) + precision_np = estimator.get_precision() + covariance_np = estimator.get_covariance() + + with config_context(array_api_dispatch=True): + estimator_xp = clone(estimator).fit(iris_xp) + precision_xp = estimator_xp.get_precision() + assert precision_xp.shape == (4, 4) + assert precision_xp.dtype == iris_xp.dtype + + assert_allclose( + _convert_to_numpy(precision_xp, xp=xp), + precision_np, + atol=_atol_for_type(dtype_name), + ) + covariance_xp = estimator_xp.get_covariance() + assert covariance_xp.shape == (4, 4) + assert covariance_xp.dtype == iris_xp.dtype + + assert_allclose( + _convert_to_numpy(covariance_xp, xp=xp), + covariance_np, + atol=_atol_for_type(dtype_name), + ) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_input_and_values, check_array_api_get_precision], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + PCA(n_components=2, svd_solver="full"), + PCA(n_components=0.1, svd_solver="full", whiten=True), + PCA( + n_components=2, + svd_solver="randomized", + power_iteration_normalizer="QR", + random_state=0, # how to use global_random_seed here? + ), + ], + ids=_get_check_estimator_ids, +) +def test_pca_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_get_precision], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + # PCA with mle cannot use check_array_api_input_and_values because of + # rounding errors in the noisy (low variance) components. Even checking + # the shape of the `components_` is problematic because the number of + # components depends on trimming threshold of the mle algorithm which + # can depend on device-specific rounding errors. + PCA(n_components="mle", svd_solver="full"), + ], + ids=_get_check_estimator_ids, +) +def test_pca_mle_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + # Simpler variant of the generic check_array_api_input checker tailored for + # the specific case of PCA with mle-trimmed components. + xp = _array_api_for_tests(array_namespace, device) + + X, y = make_classification(random_state=42) + X = X.astype(dtype_name, copy=False) + atol = _atol_for_type(X.dtype) + + est = clone(estimator) + + X_xp = xp.asarray(X, device=device) + y_xp = xp.asarray(y, device=device) + + est.fit(X, y) + + components_np = est.components_ + explained_variance_np = est.explained_variance_ + + est_xp = clone(est) + with config_context(array_api_dispatch=True): + est_xp.fit(X_xp, y_xp) + components_xp = est_xp.components_ + assert array_device(components_xp) == array_device(X_xp) + components_xp_np = _convert_to_numpy(components_xp, xp=xp) + + explained_variance_xp = est_xp.explained_variance_ + assert array_device(explained_variance_xp) == array_device(X_xp) + explained_variance_xp_np = _convert_to_numpy(explained_variance_xp, xp=xp) + + assert components_xp_np.dtype == components_np.dtype + assert components_xp_np.shape[1] == components_np.shape[1] + assert explained_variance_xp_np.dtype == explained_variance_np.dtype + + # Check that the explained variance values match for the + # common components: + min_components = min(components_xp_np.shape[0], components_np.shape[0]) + assert_allclose( + explained_variance_xp_np[:min_components], + explained_variance_np[:min_components], + atol=atol, + ) + + # If the number of components differ, check that the explained variance of + # the trimmed components is very small. + if components_xp_np.shape[0] != components_np.shape[0]: + reference_variance = explained_variance_np[-1] + extra_variance_np = explained_variance_np[min_components:] + extra_variance_xp_np = explained_variance_xp_np[min_components:] + assert all(np.abs(extra_variance_np - reference_variance) < atol) + assert all(np.abs(extra_variance_xp_np - reference_variance) < atol) + + +def test_array_api_error_and_warnings_on_unsupported_params(): + pytest.importorskip("array_api_compat") + xp = pytest.importorskip("numpy.array_api") + iris_xp = xp.asarray(iris.data) + + pca = PCA(n_components=2, svd_solver="arpack", random_state=0) + expected_msg = re.escape( + "PCA with svd_solver='arpack' is not supported for Array API inputs." + ) + with pytest.raises(ValueError, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) + + pca.set_params(svd_solver="randomized", power_iteration_normalizer="LU") + expected_msg = re.escape( + "Array API does not support LU factorization. Set" + " `power_iteration_normalizer='QR'` instead." + ) + with pytest.raises(ValueError, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) + + pca.set_params(svd_solver="randomized", power_iteration_normalizer="auto") + expected_msg = re.escape( + "Array API does not support LU factorization, falling back to QR instead. Set" + " `power_iteration_normalizer='QR'` explicitly to silence this warning." + ) + with pytest.warns(UserWarning, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4db85303f4b6bc9983cda7bc31eee349c1f425a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py @@ -0,0 +1,19 @@ +""" +The :mod:`sklearn.feature_extraction` module deals with feature extraction +from raw data. It currently includes methods to extract features from text and +images. +""" + +from . import text +from ._dict_vectorizer import DictVectorizer +from ._hash import FeatureHasher +from .image import grid_to_graph, img_to_graph + +__all__ = [ + "DictVectorizer", + "image", + "img_to_graph", + "grid_to_graph", + "text", + "FeatureHasher", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b766d6a8043f0bd5a3bfdaea28dd997749a37f46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcb42269b833c3d819418a39142b53e94150106a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f019c37fa536473d5dd30282a173620e274c7532 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ff701222d66c0d1c96d23531076be74e2412ee9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0a07d37243febff99cfa41d00204bf644b58818 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5faeb5809b1125c14af59e7e95c35b4101d1f18b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..9855684b550c40972cb242f48118f684e461f035 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py @@ -0,0 +1,452 @@ +# Authors: Lars Buitinck +# Dan Blanchard +# License: BSD 3 clause + +from array import array +from collections.abc import Iterable, Mapping +from numbers import Number +from operator import itemgetter + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array +from ..utils.validation import check_is_fitted + + +class DictVectorizer(TransformerMixin, BaseEstimator): + """Transforms lists of feature-value mappings to vectors. + + This transformer turns lists of mappings (dict-like objects) of feature + names to feature values into Numpy arrays or scipy.sparse matrices for use + with scikit-learn estimators. + + When feature values are strings, this transformer will do a binary one-hot + (aka one-of-K) coding: one boolean-valued feature is constructed for each + of the possible string values that the feature can take on. For instance, + a feature "f" that can take on the values "ham" and "spam" will become two + features in the output, one signifying "f=ham", the other "f=spam". + + If a feature value is a sequence or set of strings, this transformer + will iterate over the values and will count the occurrences of each string + value. + + However, note that this transformer will only do a binary one-hot encoding + when feature values are of type string. If categorical features are + represented as numeric values such as int or iterables of strings, the + DictVectorizer can be followed by + :class:`~sklearn.preprocessing.OneHotEncoder` to complete + binary one-hot encoding. + + Features that do not occur in a sample (mapping) will have a zero value + in the resulting array/matrix. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + dtype : dtype, default=np.float64 + The type of feature values. Passed to Numpy array/scipy.sparse matrix + constructors as the dtype argument. + separator : str, default="=" + Separator string used when constructing new features for one-hot + coding. + sparse : bool, default=True + Whether transform should produce scipy.sparse matrices. + sort : bool, default=True + Whether ``feature_names_`` and ``vocabulary_`` should be + sorted when fitting. + + Attributes + ---------- + vocabulary_ : dict + A dictionary mapping feature names to feature indices. + + feature_names_ : list + A list of length n_features containing the feature names (e.g., "f=ham" + and "f=spam"). + + See Also + -------- + FeatureHasher : Performs vectorization using only a hash function. + sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical + features encoded as columns of arbitrary data types. + + Examples + -------- + >>> from sklearn.feature_extraction import DictVectorizer + >>> v = DictVectorizer(sparse=False) + >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] + >>> X = v.fit_transform(D) + >>> X + array([[2., 0., 1.], + [0., 1., 3.]]) + >>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0}, + ... {'baz': 1.0, 'foo': 3.0}] + True + >>> v.transform({'foo': 4, 'unseen_feature': 3}) + array([[0., 0., 4.]]) + """ + + _parameter_constraints: dict = { + "dtype": "no_validation", # validation delegated to numpy, + "separator": [str], + "sparse": ["boolean"], + "sort": ["boolean"], + } + + def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True): + self.dtype = dtype + self.separator = separator + self.sparse = sparse + self.sort = sort + + def _add_iterable_element( + self, + f, + v, + feature_names, + vocab, + *, + fitting=True, + transforming=False, + indices=None, + values=None, + ): + """Add feature names for iterable of strings""" + for vv in v: + if isinstance(vv, str): + feature_name = "%s%s%s" % (f, self.separator, vv) + vv = 1 + else: + raise TypeError( + f"Unsupported type {type(vv)} in iterable " + "value. Only iterables of string are " + "supported." + ) + if fitting and feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if transforming and feature_name in vocab: + indices.append(vocab[feature_name]) + values.append(self.dtype(vv)) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn a list of feature name -> indices mappings. + + Parameters + ---------- + X : Mapping or iterable over Mappings + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + .. versionchanged:: 0.24 + Accepts multiple string values for one categorical feature. + + y : (ignored) + Ignored parameter. + + Returns + ------- + self : object + DictVectorizer class instance. + """ + feature_names = [] + vocab = {} + + for x in X: + for f, v in x.items(): + if isinstance(v, str): + feature_name = "%s%s%s" % (f, self.separator, v) + elif isinstance(v, Number) or (v is None): + feature_name = f + elif isinstance(v, Mapping): + raise TypeError( + f"Unsupported value type {type(v)} " + f"for {f}: {v}.\n" + "Mapping objects are not supported." + ) + elif isinstance(v, Iterable): + feature_name = None + self._add_iterable_element(f, v, feature_names, vocab) + + if feature_name is not None: + if feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if self.sort: + feature_names.sort() + vocab = {f: i for i, f in enumerate(feature_names)} + + self.feature_names_ = feature_names + self.vocabulary_ = vocab + + return self + + def _transform(self, X, fitting): + # Sanity check: Python's array has no way of explicitly requesting the + # signed 32-bit integers that scipy.sparse needs, so we use the next + # best thing: typecode "i" (int). However, if that gives larger or + # smaller integers than 32-bit ones, np.frombuffer screws up. + assert array("i").itemsize == 4, ( + "sizeof(int) != 4 on your platform; please report this at" + " https://github.com/scikit-learn/scikit-learn/issues and" + " include the output from platform.platform() in your bug report" + ) + + dtype = self.dtype + if fitting: + feature_names = [] + vocab = {} + else: + feature_names = self.feature_names_ + vocab = self.vocabulary_ + + transforming = True + + # Process everything as sparse regardless of setting + X = [X] if isinstance(X, Mapping) else X + + indices = array("i") + indptr = [0] + # XXX we could change values to an array.array as well, but it + # would require (heuristic) conversion of dtype to typecode... + values = [] + + # collect all the possible feature names and build sparse matrix at + # same time + for x in X: + for f, v in x.items(): + if isinstance(v, str): + feature_name = "%s%s%s" % (f, self.separator, v) + v = 1 + elif isinstance(v, Number) or (v is None): + feature_name = f + elif not isinstance(v, Mapping) and isinstance(v, Iterable): + feature_name = None + self._add_iterable_element( + f, + v, + feature_names, + vocab, + fitting=fitting, + transforming=transforming, + indices=indices, + values=values, + ) + else: + raise TypeError( + f"Unsupported value Type {type(v)} " + f"for {f}: {v}.\n" + f"{type(v)} objects are not supported." + ) + + if feature_name is not None: + if fitting and feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if feature_name in vocab: + indices.append(vocab[feature_name]) + values.append(self.dtype(v)) + + indptr.append(len(indices)) + + if len(indptr) == 1: + raise ValueError("Sample sequence X is empty.") + + indices = np.frombuffer(indices, dtype=np.intc) + shape = (len(indptr) - 1, len(vocab)) + + result_matrix = sp.csr_matrix( + (values, indices, indptr), shape=shape, dtype=dtype + ) + + # Sort everything if asked + if fitting and self.sort: + feature_names.sort() + map_index = np.empty(len(feature_names), dtype=np.int32) + for new_val, f in enumerate(feature_names): + map_index[new_val] = vocab[f] + vocab[f] = new_val + result_matrix = result_matrix[:, map_index] + + if self.sparse: + result_matrix.sort_indices() + else: + result_matrix = result_matrix.toarray() + + if fitting: + self.feature_names_ = feature_names + self.vocabulary_ = vocab + + return result_matrix + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Learn a list of feature name -> indices mappings and transform X. + + Like fit(X) followed by transform(X), but does not require + materializing X in memory. + + Parameters + ---------- + X : Mapping or iterable over Mappings + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + .. versionchanged:: 0.24 + Accepts multiple string values for one categorical feature. + + y : (ignored) + Ignored parameter. + + Returns + ------- + Xa : {array, sparse matrix} + Feature vectors; always 2-d. + """ + return self._transform(X, fitting=True) + + def inverse_transform(self, X, dict_type=dict): + """Transform array or sparse matrix X back to feature mappings. + + X must have been produced by this DictVectorizer's transform or + fit_transform method; it may only have passed through transformers + that preserve the number of features and their order. + + In the case of one-hot/one-of-K coding, the constructed feature + names and values are returned rather than the original ones. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample matrix. + dict_type : type, default=dict + Constructor for feature mappings. Must conform to the + collections.Mapping API. + + Returns + ------- + D : list of dict_type objects of shape (n_samples,) + Feature mappings for the samples in X. + """ + check_is_fitted(self, "feature_names_") + + # COO matrix is not subscriptable + X = check_array(X, accept_sparse=["csr", "csc"]) + n_samples = X.shape[0] + + names = self.feature_names_ + dicts = [dict_type() for _ in range(n_samples)] + + if sp.issparse(X): + for i, j in zip(*X.nonzero()): + dicts[i][names[j]] = X[i, j] + else: + for i, d in enumerate(dicts): + for j, v in enumerate(X[i, :]): + if v != 0: + d[names[j]] = X[i, j] + + return dicts + + def transform(self, X): + """Transform feature->value dicts to array or sparse matrix. + + Named features not encountered during fit or fit_transform will be + silently ignored. + + Parameters + ---------- + X : Mapping or iterable over Mappings of shape (n_samples,) + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + Returns + ------- + Xa : {array, sparse matrix} + Feature vectors; always 2-d. + """ + check_is_fitted(self, ["feature_names_", "vocabulary_"]) + return self._transform(X, fitting=False) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "feature_names_") + if any(not isinstance(name, str) for name in self.feature_names_): + feature_names = [str(name) for name in self.feature_names_] + else: + feature_names = self.feature_names_ + return np.asarray(feature_names, dtype=object) + + def restrict(self, support, indices=False): + """Restrict the features to those in support using feature selection. + + This function modifies the estimator in-place. + + Parameters + ---------- + support : array-like + Boolean mask or list of indices (as returned by the get_support + member of feature selectors). + indices : bool, default=False + Whether support is a list of indices. + + Returns + ------- + self : object + DictVectorizer class instance. + + Examples + -------- + >>> from sklearn.feature_extraction import DictVectorizer + >>> from sklearn.feature_selection import SelectKBest, chi2 + >>> v = DictVectorizer() + >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] + >>> X = v.fit_transform(D) + >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1]) + >>> v.get_feature_names_out() + array(['bar', 'baz', 'foo'], ...) + >>> v.restrict(support.get_support()) + DictVectorizer() + >>> v.get_feature_names_out() + array(['bar', 'foo'], ...) + """ + check_is_fitted(self, "feature_names_") + + if not indices: + support = np.where(support)[0] + + names = self.feature_names_ + new_vocab = {} + for i in support: + new_vocab[names[i]] = len(new_vocab) + + self.vocabulary_ = new_vocab + self.feature_names_ = [ + f for f, i in sorted(new_vocab.items(), key=itemgetter(1)) + ] + + return self + + def _more_tags(self): + return {"X_types": ["dict"]} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py new file mode 100644 index 0000000000000000000000000000000000000000..9874bc0a028352b4f9ca5fa517636ea8e0e6499e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py @@ -0,0 +1,197 @@ +# Author: Lars Buitinck +# License: BSD 3 clause + +from itertools import chain +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils._param_validation import Interval, StrOptions +from ._hashing_fast import transform as _hashing_transform + + +def _iteritems(d): + """Like d.iteritems, but accepts any collections.Mapping.""" + return d.iteritems() if hasattr(d, "iteritems") else d.items() + + +class FeatureHasher(TransformerMixin, BaseEstimator): + """Implements feature hashing, aka the hashing trick. + + This class turns sequences of symbolic feature names (strings) into + scipy.sparse matrices, using a hash function to compute the matrix column + corresponding to a name. The hash function employed is the signed 32-bit + version of Murmurhash3. + + Feature names of type byte string are used as-is. Unicode strings are + converted to UTF-8 first, but no Unicode normalization is done. + Feature values must be (finite) numbers. + + This class is a low-memory alternative to DictVectorizer and + CountVectorizer, intended for large-scale (online) learning and situations + where memory is tight, e.g. when running prediction code on embedded + devices. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_features : int, default=2**20 + The number of features (columns) in the output matrices. Small numbers + of features are likely to cause hash collisions, but large numbers + will cause larger coefficient dimensions in linear learners. + input_type : str, default='dict' + Choose a string from {'dict', 'pair', 'string'}. + Either "dict" (the default) to accept dictionaries over + (feature_name, value); "pair" to accept pairs of (feature_name, value); + or "string" to accept single strings. + feature_name should be a string, while value should be a number. + In the case of "string", a value of 1 is implied. + The feature_name is hashed to find the appropriate column for the + feature. The value's sign might be flipped in the output (but see + non_negative, below). + dtype : numpy dtype, default=np.float64 + The type of feature values. Passed to scipy.sparse matrix constructors + as the dtype argument. Do not set this to bool, np.boolean or any + unsigned integer type. + alternate_sign : bool, default=True + When True, an alternating sign is added to the features as to + approximately conserve the inner product in the hashed space even for + small n_features. This approach is similar to sparse random projection. + + .. versionchanged:: 0.19 + ``alternate_sign`` replaces the now deprecated ``non_negative`` + parameter. + + See Also + -------- + DictVectorizer : Vectorizes string-valued features using a hash table. + sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features. + + Notes + ----- + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.feature_extraction import FeatureHasher + >>> h = FeatureHasher(n_features=10) + >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}] + >>> f = h.transform(D) + >>> f.toarray() + array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.], + [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]]) + + With `input_type="string"`, the input must be an iterable over iterables of + strings: + + >>> h = FeatureHasher(n_features=8, input_type="string") + >>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]] + >>> f = h.transform(raw_X) + >>> f.toarray() + array([[ 0., 0., 0., -1., 0., -1., 0., 1.], + [ 0., 0., 0., -1., 0., -1., 0., 0.], + [ 0., -1., 0., 0., 0., 0., 0., 1.]]) + """ + + _parameter_constraints: dict = { + "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")], + "input_type": [StrOptions({"dict", "pair", "string"})], + "dtype": "no_validation", # delegate to numpy + "alternate_sign": ["boolean"], + } + + def __init__( + self, + n_features=(2**20), + *, + input_type="dict", + dtype=np.float64, + alternate_sign=True, + ): + self.dtype = dtype + self.input_type = input_type + self.n_features = n_features + self.alternate_sign = alternate_sign + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X=None, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : Ignored + Not used, present here for API consistency by convention. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + FeatureHasher class instance. + """ + return self + + def transform(self, raw_X): + """Transform a sequence of instances to a scipy.sparse matrix. + + Parameters + ---------- + raw_X : iterable over iterable over raw features, length = n_samples + Samples. Each sample must be iterable an (e.g., a list or tuple) + containing/generating feature names (and optionally values, see + the input_type constructor argument) which will be hashed. + raw_X need not support the len function, so it can be the result + of a generator; n_samples is determined on the fly. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Feature matrix, for use with estimators or further transformers. + """ + raw_X = iter(raw_X) + if self.input_type == "dict": + raw_X = (_iteritems(d) for d in raw_X) + elif self.input_type == "string": + first_raw_X = next(raw_X) + if isinstance(first_raw_X, str): + raise ValueError( + "Samples can not be a single string. The input must be an iterable" + " over iterables of strings." + ) + raw_X_ = chain([first_raw_X], raw_X) + raw_X = (((f, 1) for f in x) for x in raw_X_) + + indices, indptr, values = _hashing_transform( + raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0 + ) + n_samples = indptr.shape[0] - 1 + + if n_samples == 0: + raise ValueError("Cannot vectorize empty sequence.") + + X = sp.csr_matrix( + (values, indices, indptr), + dtype=self.dtype, + shape=(n_samples, self.n_features), + ) + X.sum_duplicates() # also sorts the indices + + return X + + def _more_tags(self): + return {"X_types": [self.input_type]} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f628496339e5d710906eef7b8599438abc8215e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py new file mode 100644 index 0000000000000000000000000000000000000000..37ae02a0f36c54fb47b87d8368bc1a2507404cfb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py @@ -0,0 +1,325 @@ +# This list of English stop words is taken from the "Glasgow Information +# Retrieval Group". The original list can be found at +# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words +ENGLISH_STOP_WORDS = frozenset( + [ + "a", + "about", + "above", + "across", + "after", + "afterwards", + "again", + "against", + "all", + "almost", + "alone", + "along", + "already", + "also", + "although", + "always", + "am", + "among", + "amongst", + "amoungst", + "amount", + "an", + "and", + "another", + "any", + "anyhow", + "anyone", + "anything", + "anyway", + "anywhere", + "are", + "around", + "as", + "at", + "back", + "be", + "became", + "because", + "become", + "becomes", + "becoming", + "been", + "before", + "beforehand", + "behind", + "being", + "below", + "beside", + "besides", + "between", + "beyond", + "bill", + "both", + "bottom", + "but", + "by", + "call", + "can", + "cannot", + "cant", + "co", + "con", + "could", + "couldnt", + "cry", + "de", + "describe", + "detail", + "do", + "done", + "down", + "due", + "during", + "each", + "eg", + "eight", + "either", + "eleven", + "else", + "elsewhere", + "empty", + "enough", + "etc", + "even", + "ever", + "every", + "everyone", + "everything", + "everywhere", + "except", + "few", + "fifteen", + "fifty", + "fill", + "find", + "fire", + "first", + "five", + "for", + "former", + "formerly", + "forty", + "found", + "four", + "from", + "front", + "full", + "further", + "get", + "give", + "go", + "had", + "has", + "hasnt", + "have", + "he", + "hence", + "her", + "here", + "hereafter", + "hereby", + "herein", + "hereupon", + "hers", + "herself", + "him", + "himself", + "his", + "how", + "however", + "hundred", + "i", + "ie", + "if", + "in", + "inc", + "indeed", + "interest", + "into", + "is", + "it", + "its", + "itself", + "keep", + "last", + "latter", + "latterly", + "least", + "less", + "ltd", + "made", + "many", + "may", + "me", + "meanwhile", + "might", + "mill", + "mine", + "more", + "moreover", + "most", + "mostly", + "move", + "much", + "must", + "my", + "myself", + "name", + "namely", + "neither", + "never", + "nevertheless", + "next", + "nine", + "no", + "nobody", + "none", + "noone", + "nor", + "not", + "nothing", + "now", + "nowhere", + "of", + "off", + "often", + "on", + "once", + "one", + "only", + "onto", + "or", + "other", + "others", + "otherwise", + "our", + "ours", + "ourselves", + "out", + "over", + "own", + "part", + "per", + "perhaps", + "please", + "put", + "rather", + "re", + "same", + "see", + "seem", + "seemed", + "seeming", + "seems", + "serious", + "several", + "she", + "should", + "show", + "side", + "since", + "sincere", + "six", + "sixty", + "so", + "some", + "somehow", + "someone", + "something", + "sometime", + "sometimes", + "somewhere", + "still", + "such", + "system", + "take", + "ten", + "than", + "that", + "the", + "their", + "them", + "themselves", + "then", + "thence", + "there", + "thereafter", + "thereby", + "therefore", + "therein", + "thereupon", + "these", + "they", + "thick", + "thin", + "third", + "this", + "those", + "though", + "three", + "through", + "throughout", + "thru", + "thus", + "to", + "together", + "too", + "top", + "toward", + "towards", + "twelve", + "twenty", + "two", + "un", + "under", + "until", + "up", + "upon", + "us", + "very", + "via", + "was", + "we", + "well", + "were", + "what", + "whatever", + "when", + "whence", + "whenever", + "where", + "whereafter", + "whereas", + "whereby", + "wherein", + "whereupon", + "wherever", + "whether", + "which", + "while", + "whither", + "who", + "whoever", + "whole", + "whom", + "whose", + "why", + "will", + "with", + "within", + "without", + "would", + "yet", + "you", + "your", + "yours", + "yourself", + "yourselves", + ] +) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/image.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/image.py new file mode 100644 index 0000000000000000000000000000000000000000..718f47e3e8a74693154655e727e3e250ce5bc008 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/image.py @@ -0,0 +1,671 @@ +""" +The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to +extract features from images. +""" + +# Authors: Emmanuelle Gouillart +# Gael Varoquaux +# Olivier Grisel +# Vlad Niculae +# License: BSD 3 clause + +from itertools import product +from numbers import Integral, Number, Real + +import numpy as np +from numpy.lib.stride_tricks import as_strided +from scipy import sparse + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array, check_random_state +from ..utils._param_validation import Hidden, Interval, RealNotInt, validate_params + +__all__ = [ + "PatchExtractor", + "extract_patches_2d", + "grid_to_graph", + "img_to_graph", + "reconstruct_from_patches_2d", +] + +############################################################################### +# From an image to a graph + + +def _make_edges_3d(n_x, n_y, n_z=1): + """Returns a list of edges for a 3D image. + + Parameters + ---------- + n_x : int + The size of the grid in the x direction. + n_y : int + The size of the grid in the y direction. + n_z : integer, default=1 + The size of the grid in the z direction, defaults to 1 + """ + vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z)) + edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel())) + edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel())) + edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel())) + edges = np.hstack((edges_deep, edges_right, edges_down)) + return edges + + +def _compute_gradient_3d(edges, img): + _, n_y, n_z = img.shape + gradient = np.abs( + img[ + edges[0] // (n_y * n_z), + (edges[0] % (n_y * n_z)) // n_z, + (edges[0] % (n_y * n_z)) % n_z, + ] + - img[ + edges[1] // (n_y * n_z), + (edges[1] % (n_y * n_z)) // n_z, + (edges[1] % (n_y * n_z)) % n_z, + ] + ) + return gradient + + +# XXX: Why mask the image after computing the weights? + + +def _mask_edges_weights(mask, edges, weights=None): + """Apply a mask to edges (weighted or not)""" + inds = np.arange(mask.size) + inds = inds[mask.ravel()] + ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds)) + edges = edges[:, ind_mask] + if weights is not None: + weights = weights[ind_mask] + if len(edges.ravel()): + maxval = edges.max() + else: + maxval = 0 + order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1)) + edges = order[edges] + if weights is None: + return edges + else: + return edges, weights + + +def _to_graph( + n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None +): + """Auxiliary function for img_to_graph and grid_to_graph""" + edges = _make_edges_3d(n_x, n_y, n_z) + + if dtype is None: # To not overwrite input dtype + if img is None: + dtype = int + else: + dtype = img.dtype + + if img is not None: + img = np.atleast_3d(img) + weights = _compute_gradient_3d(edges, img) + if mask is not None: + edges, weights = _mask_edges_weights(mask, edges, weights) + diag = img.squeeze()[mask] + else: + diag = img.ravel() + n_voxels = diag.size + else: + if mask is not None: + mask = mask.astype(dtype=bool, copy=False) + edges = _mask_edges_weights(mask, edges) + n_voxels = np.sum(mask) + else: + n_voxels = n_x * n_y * n_z + weights = np.ones(edges.shape[1], dtype=dtype) + diag = np.ones(n_voxels, dtype=dtype) + + diag_idx = np.arange(n_voxels) + i_idx = np.hstack((edges[0], edges[1])) + j_idx = np.hstack((edges[1], edges[0])) + graph = sparse.coo_matrix( + ( + np.hstack((weights, weights, diag)), + (np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))), + ), + (n_voxels, n_voxels), + dtype=dtype, + ) + if return_as is np.ndarray: + return graph.toarray() + return return_as(graph) + + +@validate_params( + { + "img": ["array-like"], + "mask": [None, np.ndarray], + "return_as": [type], + "dtype": "no_validation", # validation delegated to numpy + }, + prefer_skip_nested_validation=True, +) +def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None): + """Graph of the pixel-to-pixel gradient connections. + + Edges are weighted with the gradient values. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + img : array-like of shape (height, width) or (height, width, channel) + 2D or 3D image. + mask : ndarray of shape (height, width) or \ + (height, width, channel), dtype=bool, default=None + An optional mask of the image, to consider only part of the + pixels. + return_as : np.ndarray or a sparse matrix class, \ + default=sparse.coo_matrix + The class to use to build the returned adjacency matrix. + dtype : dtype, default=None + The data of the returned sparse matrix. By default it is the + dtype of img. + + Returns + ------- + graph : ndarray or a sparse matrix class + The computed adjacency matrix. + + Notes + ----- + For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was + handled by returning a dense np.matrix instance. Going forward, np.ndarray + returns an np.ndarray, as expected. + + For compatibility, user code relying on this method should wrap its + calls in ``np.asarray`` to avoid type issues. + """ + img = np.atleast_3d(img) + n_x, n_y, n_z = img.shape + return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype) + + +@validate_params( + { + "n_x": [Interval(Integral, left=1, right=None, closed="left")], + "n_y": [Interval(Integral, left=1, right=None, closed="left")], + "n_z": [Interval(Integral, left=1, right=None, closed="left")], + "mask": [None, np.ndarray], + "return_as": [type], + "dtype": "no_validation", # validation delegated to numpy + }, + prefer_skip_nested_validation=True, +) +def grid_to_graph( + n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int +): + """Graph of the pixel-to-pixel connections. + + Edges exist if 2 voxels are connected. + + Parameters + ---------- + n_x : int + Dimension in x axis. + n_y : int + Dimension in y axis. + n_z : int, default=1 + Dimension in z axis. + mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None + An optional mask of the image, to consider only part of the + pixels. + return_as : np.ndarray or a sparse matrix class, \ + default=sparse.coo_matrix + The class to use to build the returned adjacency matrix. + dtype : dtype, default=int + The data of the returned sparse matrix. By default it is int. + + Returns + ------- + graph : np.ndarray or a sparse matrix class + The computed adjacency matrix. + + Notes + ----- + For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was + handled by returning a dense np.matrix instance. Going forward, np.ndarray + returns an np.ndarray, as expected. + + For compatibility, user code relying on this method should wrap its + calls in ``np.asarray`` to avoid type issues. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.feature_extraction.image import grid_to_graph + >>> shape_img = (4, 4, 1) + >>> mask = np.zeros(shape=shape_img, dtype=bool) + >>> mask[[1, 2], [1, 2], :] = True + >>> graph = grid_to_graph(*shape_img, mask=mask) + >>> print(graph) + (0, 0) 1 + (1, 1) 1 + """ + return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype) + + +############################################################################### +# From an image to a set of small image patches + + +def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None): + """Compute the number of patches that will be extracted in an image. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + i_h : int + The image height + i_w : int + The image with + p_h : int + The height of a patch + p_w : int + The width of a patch + max_patches : int or float, default=None + The maximum number of patches to extract. If `max_patches` is a float + between 0 and 1, it is taken to be a proportion of the total number + of patches. If `max_patches` is None, all possible patches are extracted. + """ + n_h = i_h - p_h + 1 + n_w = i_w - p_w + 1 + all_patches = n_h * n_w + + if max_patches: + if isinstance(max_patches, (Integral)) and max_patches < all_patches: + return max_patches + elif isinstance(max_patches, (Integral)) and max_patches >= all_patches: + return all_patches + elif isinstance(max_patches, (Real)) and 0 < max_patches < 1: + return int(max_patches * all_patches) + else: + raise ValueError("Invalid value for max_patches: %r" % max_patches) + else: + return all_patches + + +def _extract_patches(arr, patch_shape=8, extraction_step=1): + """Extracts patches of any n-dimensional array in place using strides. + + Given an n-dimensional array it will return a 2n-dimensional array with + the first n dimensions indexing patch position and the last n indexing + the patch content. This operation is immediate (O(1)). A reshape + performed on the first n dimensions will cause numpy to copy data, leading + to a list of extracted patches. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + arr : ndarray + n-dimensional array of which patches are to be extracted + + patch_shape : int or tuple of length arr.ndim.default=8 + Indicates the shape of the patches to be extracted. If an + integer is given, the shape will be a hypercube of + sidelength given by its value. + + extraction_step : int or tuple of length arr.ndim, default=1 + Indicates step size at which extraction shall be performed. + If integer is given, then the step is uniform in all dimensions. + + + Returns + ------- + patches : strided ndarray + 2n-dimensional array indexing patches on first n dimensions and + containing patches on the last n dimensions. These dimensions + are fake, but this way no data is copied. A simple reshape invokes + a copying operation to obtain a list of patches: + result.reshape([-1] + list(patch_shape)) + """ + + arr_ndim = arr.ndim + + if isinstance(patch_shape, Number): + patch_shape = tuple([patch_shape] * arr_ndim) + if isinstance(extraction_step, Number): + extraction_step = tuple([extraction_step] * arr_ndim) + + patch_strides = arr.strides + + slices = tuple(slice(None, None, st) for st in extraction_step) + indexing_strides = arr[slices].strides + + patch_indices_shape = ( + (np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step) + ) + 1 + + shape = tuple(list(patch_indices_shape) + list(patch_shape)) + strides = tuple(list(indexing_strides) + list(patch_strides)) + + patches = as_strided(arr, shape=shape, strides=strides) + return patches + + +@validate_params( + { + "image": [np.ndarray], + "patch_size": [tuple, list], + "max_patches": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(Integral, 1, None, closed="left"), + None, + ], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): + """Reshape a 2D image into a collection of patches. + + The resulting patches are allocated in a dedicated array. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + image : ndarray of shape (image_height, image_width) or \ + (image_height, image_width, n_channels) + The original image data. For color images, the last dimension specifies + the channel: a RGB image would have `n_channels=3`. + + patch_size : tuple of int (patch_height, patch_width) + The dimensions of one patch. + + max_patches : int or float, default=None + The maximum number of patches to extract. If `max_patches` is a float + between 0 and 1, it is taken to be a proportion of the total number + of patches. If `max_patches` is None it corresponds to the total number + of patches that can be extracted. + + random_state : int, RandomState instance, default=None + Determines the random number generator used for random sampling when + `max_patches` is not None. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + Returns + ------- + patches : array of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The collection of patches extracted from the image, where `n_patches` + is either `max_patches` or the total number of patches that can be + extracted. + + Examples + -------- + >>> from sklearn.datasets import load_sample_image + >>> from sklearn.feature_extraction import image + >>> # Use the array data from the first image in this dataset: + >>> one_image = load_sample_image("china.jpg") + >>> print('Image shape: {}'.format(one_image.shape)) + Image shape: (427, 640, 3) + >>> patches = image.extract_patches_2d(one_image, (2, 2)) + >>> print('Patches shape: {}'.format(patches.shape)) + Patches shape: (272214, 2, 2, 3) + >>> # Here are just two of these patches: + >>> print(patches[1]) + [[[174 201 231] + [174 201 231]] + [[173 200 230] + [173 200 230]]] + >>> print(patches[800]) + [[[187 214 243] + [188 215 244]] + [[187 214 243] + [188 215 244]]] + """ + i_h, i_w = image.shape[:2] + p_h, p_w = patch_size + + if p_h > i_h: + raise ValueError( + "Height of the patch should be less than the height of the image." + ) + + if p_w > i_w: + raise ValueError( + "Width of the patch should be less than the width of the image." + ) + + image = check_array(image, allow_nd=True) + image = image.reshape((i_h, i_w, -1)) + n_colors = image.shape[-1] + + extracted_patches = _extract_patches( + image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 + ) + + n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) + if max_patches: + rng = check_random_state(random_state) + i_s = rng.randint(i_h - p_h + 1, size=n_patches) + j_s = rng.randint(i_w - p_w + 1, size=n_patches) + patches = extracted_patches[i_s, j_s, 0] + else: + patches = extracted_patches + + patches = patches.reshape(-1, p_h, p_w, n_colors) + # remove the color dimension if useless + if patches.shape[-1] == 1: + return patches.reshape((n_patches, p_h, p_w)) + else: + return patches + + +@validate_params( + {"patches": [np.ndarray], "image_size": [tuple, Hidden(list)]}, + prefer_skip_nested_validation=True, +) +def reconstruct_from_patches_2d(patches, image_size): + """Reconstruct the image from all of its patches. + + Patches are assumed to overlap and the image is constructed by filling in + the patches from left to right, top to bottom, averaging the overlapping + regions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + patches : ndarray of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The complete set of patches. If the patches contain colour information, + channels are indexed along the last dimension: RGB patches would + have `n_channels=3`. + + image_size : tuple of int (image_height, image_width) or \ + (image_height, image_width, n_channels) + The size of the image that will be reconstructed. + + Returns + ------- + image : ndarray of shape image_size + The reconstructed image. + """ + i_h, i_w = image_size[:2] + p_h, p_w = patches.shape[1:3] + img = np.zeros(image_size) + # compute the dimensions of the patches array + n_h = i_h - p_h + 1 + n_w = i_w - p_w + 1 + for p, (i, j) in zip(patches, product(range(n_h), range(n_w))): + img[i : i + p_h, j : j + p_w] += p + + for i in range(i_h): + for j in range(i_w): + # divide by the amount of overlap + # XXX: is this the most efficient way? memory-wise yes, cpu wise? + img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j)) + return img + + +class PatchExtractor(TransformerMixin, BaseEstimator): + """Extracts patches from a collection of images. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + patch_size : tuple of int (patch_height, patch_width), default=None + The dimensions of one patch. If set to None, the patch size will be + automatically set to `(img_height // 10, img_width // 10)`, where + `img_height` and `img_width` are the dimensions of the input images. + + max_patches : int or float, default=None + The maximum number of patches per image to extract. If `max_patches` is + a float in (0, 1), it is taken to mean a proportion of the total number + of patches. If set to None, extract all possible patches. + + random_state : int, RandomState instance, default=None + Determines the random number generator used for random sampling when + `max_patches is not None`. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + See Also + -------- + reconstruct_from_patches_2d : Reconstruct image from all of its patches. + + Notes + ----- + This estimator is stateless and does not need to be fitted. However, we + recommend to call :meth:`fit_transform` instead of :meth:`transform`, as + parameter validation is only performed in :meth:`fit`. + + Examples + -------- + >>> from sklearn.datasets import load_sample_images + >>> from sklearn.feature_extraction import image + >>> # Use the array data from the second image in this dataset: + >>> X = load_sample_images().images[1] + >>> X = X[None, ...] + >>> print(f"Image shape: {X.shape}") + Image shape: (1, 427, 640, 3) + >>> pe = image.PatchExtractor(patch_size=(10, 10)) + >>> pe_trans = pe.transform(X) + >>> print(f"Patches shape: {pe_trans.shape}") + Patches shape: (263758, 10, 10, 3) + >>> X_reconstructed = image.reconstruct_from_patches_2d(pe_trans, X.shape[1:]) + >>> print(f"Reconstructed shape: {X_reconstructed.shape}") + Reconstructed shape: (427, 640, 3) + """ + + _parameter_constraints: dict = { + "patch_size": [tuple, None], + "max_patches": [ + None, + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(Integral, 1, None, closed="left"), + ], + "random_state": ["random_state"], + } + + def __init__(self, *, patch_size=None, max_patches=None, random_state=None): + self.patch_size = patch_size + self.max_patches = max_patches + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validate the parameters of the estimator. + + This method allows to: (i) validate the parameters of the estimator and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : ndarray of shape (n_samples, image_height, image_width) or \ + (n_samples, image_height, image_width, n_channels) + Array of images from which to extract patches. For color images, + the last dimension specifies the channel: a RGB image would have + `n_channels=3`. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + return self + + def transform(self, X): + """Transform the image samples in `X` into a matrix of patch data. + + Parameters + ---------- + X : ndarray of shape (n_samples, image_height, image_width) or \ + (n_samples, image_height, image_width, n_channels) + Array of images from which to extract patches. For color images, + the last dimension specifies the channel: a RGB image would have + `n_channels=3`. + + Returns + ------- + patches : array of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The collection of patches extracted from the images, where + `n_patches` is either `n_samples * max_patches` or the total + number of patches that can be extracted. + """ + X = self._validate_data( + X=X, + ensure_2d=False, + allow_nd=True, + ensure_min_samples=1, + ensure_min_features=1, + reset=False, + ) + random_state = check_random_state(self.random_state) + n_imgs, img_height, img_width = X.shape[:3] + if self.patch_size is None: + patch_size = img_height // 10, img_width // 10 + else: + if len(self.patch_size) != 2: + raise ValueError( + "patch_size must be a tuple of two integers. Got" + f" {self.patch_size} instead." + ) + patch_size = self.patch_size + + n_imgs, img_height, img_width = X.shape[:3] + X = np.reshape(X, (n_imgs, img_height, img_width, -1)) + n_channels = X.shape[-1] + + # compute the dimensions of the patches array + patch_height, patch_width = patch_size + n_patches = _compute_n_patches( + img_height, img_width, patch_height, patch_width, self.max_patches + ) + patches_shape = (n_imgs * n_patches,) + patch_size + if n_channels > 1: + patches_shape += (n_channels,) + + # extract the patches + patches = np.empty(patches_shape) + for ii, image in enumerate(X): + patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d( + image, + patch_size, + max_patches=self.max_patches, + random_state=random_state, + ) + return patches + + def _more_tags(self): + return {"X_types": ["3darray"], "stateless": True} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580e31387c55651c92bd313d7a5e4cd8245933ae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfcbd50999cb7f1d0e8abd5852a110c468d335ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45488fc235d090b3bd7d87154a1c248755b944e5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c83cd5f51ae1d638b8826b8ca9001021535f7c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dc87ab2019392492fb7f1fa60c8ecc13588b80b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..e9784d68d7199c7e40ad70d304c14ba8c66b04a9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py @@ -0,0 +1,262 @@ +# Authors: Lars Buitinck +# Dan Blanchard +# License: BSD 3 clause + +from random import Random + +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.exceptions import NotFittedError +from sklearn.feature_extraction import DictVectorizer +from sklearn.feature_selection import SelectKBest, chi2 + + +@pytest.mark.parametrize("sparse", (True, False)) +@pytest.mark.parametrize("dtype", (int, np.float32, np.int16)) +@pytest.mark.parametrize("sort", (True, False)) +@pytest.mark.parametrize("iterable", (True, False)) +def test_dictvectorizer(sparse, dtype, sort, iterable): + D = [{"foo": 1, "bar": 3}, {"bar": 4, "baz": 2}, {"bar": 1, "quux": 1, "quuux": 2}] + + v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) + X = v.fit_transform(iter(D) if iterable else D) + + assert sp.issparse(X) == sparse + assert X.shape == (3, 5) + assert X.sum() == 14 + assert v.inverse_transform(X) == D + + if sparse: + # CSR matrices can't be compared for equality + assert_array_equal( + X.toarray(), v.transform(iter(D) if iterable else D).toarray() + ) + else: + assert_array_equal(X, v.transform(iter(D) if iterable else D)) + + if sort: + assert v.feature_names_ == sorted(v.feature_names_) + + +def test_feature_selection(): + # make two feature dicts with two useful features and a bunch of useless + # ones, in terms of chi2 + d1 = dict([("useless%d" % i, 10) for i in range(20)], useful1=1, useful2=20) + d2 = dict([("useless%d" % i, 10) for i in range(20)], useful1=20, useful2=1) + + for indices in (True, False): + v = DictVectorizer().fit([d1, d2]) + X = v.transform([d1, d2]) + sel = SelectKBest(chi2, k=2).fit(X, [0, 1]) + + v.restrict(sel.get_support(indices=indices), indices=indices) + assert_array_equal(v.get_feature_names_out(), ["useful1", "useful2"]) + + +def test_one_of_k(): + D_in = [ + {"version": "1", "ham": 2}, + {"version": "2", "spam": 0.3}, + {"version=3": True, "spam": -1}, + ] + v = DictVectorizer() + X = v.fit_transform(D_in) + assert X.shape == (3, 5) + + D_out = v.inverse_transform(X) + assert D_out[0] == {"version=1": 1, "ham": 2} + + names = v.get_feature_names_out() + assert "version=2" in names + assert "version" not in names + + +def test_iterable_value(): + D_names = ["ham", "spam", "version=1", "version=2", "version=3"] + X_expected = [ + [2.0, 0.0, 2.0, 1.0, 0.0], + [0.0, 0.3, 0.0, 1.0, 0.0], + [0.0, -1.0, 0.0, 0.0, 1.0], + ] + D_in = [ + {"version": ["1", "2", "1"], "ham": 2}, + {"version": "2", "spam": 0.3}, + {"version=3": True, "spam": -1}, + ] + v = DictVectorizer() + X = v.fit_transform(D_in) + X = X.toarray() + assert_array_equal(X, X_expected) + + D_out = v.inverse_transform(X) + assert D_out[0] == {"version=1": 2, "version=2": 1, "ham": 2} + + names = v.get_feature_names_out() + + assert_array_equal(names, D_names) + + +def test_iterable_not_string_error(): + error_value = ( + "Unsupported type in iterable value. " + "Only iterables of string are supported." + ) + D2 = [{"foo": "1", "bar": "2"}, {"foo": "3", "baz": "1"}, {"foo": [1, "three"]}] + v = DictVectorizer(sparse=False) + with pytest.raises(TypeError) as error: + v.fit(D2) + assert str(error.value) == error_value + + +def test_mapping_error(): + error_value = ( + "Unsupported value type " + "for foo: {'one': 1, 'three': 3}.\n" + "Mapping objects are not supported." + ) + D2 = [ + {"foo": "1", "bar": "2"}, + {"foo": "3", "baz": "1"}, + {"foo": {"one": 1, "three": 3}}, + ] + v = DictVectorizer(sparse=False) + with pytest.raises(TypeError) as error: + v.fit(D2) + assert str(error.value) == error_value + + +def test_unseen_or_no_features(): + D = [{"camelot": 0, "spamalot": 1}] + for sparse in [True, False]: + v = DictVectorizer(sparse=sparse).fit(D) + + X = v.transform({"push the pram a lot": 2}) + if sparse: + X = X.toarray() + assert_array_equal(X, np.zeros((1, 2))) + + X = v.transform({}) + if sparse: + X = X.toarray() + assert_array_equal(X, np.zeros((1, 2))) + + with pytest.raises(ValueError, match="empty"): + v.transform([]) + + +def test_deterministic_vocabulary(global_random_seed): + # Generate equal dictionaries with different memory layouts + items = [("%03d" % i, i) for i in range(1000)] + rng = Random(global_random_seed) + d_sorted = dict(items) + rng.shuffle(items) + d_shuffled = dict(items) + + # check that the memory layout does not impact the resulting vocabulary + v_1 = DictVectorizer().fit([d_sorted]) + v_2 = DictVectorizer().fit([d_shuffled]) + + assert v_1.vocabulary_ == v_2.vocabulary_ + + +def test_n_features_in(): + # For vectorizers, n_features_in_ does not make sense and does not exist. + dv = DictVectorizer() + assert not hasattr(dv, "n_features_in_") + d = [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}] + dv.fit(d) + assert not hasattr(dv, "n_features_in_") + + +def test_dictvectorizer_dense_sparse_equivalence(): + """Check the equivalence between between sparse and dense DictVectorizer. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19978 + """ + movie_entry_fit = [ + {"category": ["thriller", "drama"], "year": 2003}, + {"category": ["animation", "family"], "year": 2011}, + {"year": 1974}, + ] + movie_entry_transform = [{"category": ["thriller"], "unseen_feature": "3"}] + dense_vectorizer = DictVectorizer(sparse=False) + sparse_vectorizer = DictVectorizer(sparse=True) + + dense_vector_fit = dense_vectorizer.fit_transform(movie_entry_fit) + sparse_vector_fit = sparse_vectorizer.fit_transform(movie_entry_fit) + + assert not sp.issparse(dense_vector_fit) + assert sp.issparse(sparse_vector_fit) + + assert_allclose(dense_vector_fit, sparse_vector_fit.toarray()) + + dense_vector_transform = dense_vectorizer.transform(movie_entry_transform) + sparse_vector_transform = sparse_vectorizer.transform(movie_entry_transform) + + assert not sp.issparse(dense_vector_transform) + assert sp.issparse(sparse_vector_transform) + + assert_allclose(dense_vector_transform, sparse_vector_transform.toarray()) + + dense_inverse_transform = dense_vectorizer.inverse_transform(dense_vector_transform) + sparse_inverse_transform = sparse_vectorizer.inverse_transform( + sparse_vector_transform + ) + + expected_inverse = [{"category=thriller": 1.0}] + assert dense_inverse_transform == expected_inverse + assert sparse_inverse_transform == expected_inverse + + +def test_dict_vectorizer_unsupported_value_type(): + """Check that we raise an error when the value associated to a feature + is not supported. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19489 + """ + + class A: + pass + + vectorizer = DictVectorizer(sparse=True) + X = [{"foo": A()}] + err_msg = "Unsupported value Type" + with pytest.raises(TypeError, match=err_msg): + vectorizer.fit_transform(X) + + +def test_dict_vectorizer_get_feature_names_out(): + """Check that integer feature names are converted to strings in + feature_names_out.""" + + X = [{1: 2, 3: 4}, {2: 4}] + dv = DictVectorizer(sparse=False).fit(X) + + feature_names = dv.get_feature_names_out() + assert isinstance(feature_names, np.ndarray) + assert feature_names.dtype == object + assert_array_equal(feature_names, ["1", "2", "3"]) + + +@pytest.mark.parametrize( + "method, input", + [ + ("transform", [{1: 2, 3: 4}, {2: 4}]), + ("inverse_transform", [{1: 2, 3: 4}, {2: 4}]), + ("restrict", [True, False, True]), + ], +) +def test_dict_vectorizer_not_fitted_error(method, input): + """Check that unfitted DictVectorizer instance raises NotFittedError. + + This should be part of the common test but currently they test estimator accepting + text input. + """ + dv = DictVectorizer(sparse=False) + + with pytest.raises(NotFittedError): + getattr(dv, method)(input) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py new file mode 100644 index 0000000000000000000000000000000000000000..276d0d48b077022559c775eab90abf363ffc6989 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py @@ -0,0 +1,160 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.feature_extraction import FeatureHasher +from sklearn.feature_extraction._hashing_fast import transform as _hashing_transform + + +def test_feature_hasher_dicts(): + feature_hasher = FeatureHasher(n_features=16) + assert "dict" == feature_hasher.input_type + + raw_X = [{"foo": "bar", "dada": 42, "tzara": 37}, {"foo": "baz", "gaga": "string1"}] + X1 = FeatureHasher(n_features=16).transform(raw_X) + gen = (iter(d.items()) for d in raw_X) + X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen) + assert_array_equal(X1.toarray(), X2.toarray()) + + +def test_feature_hasher_strings(): + # mix byte and Unicode strings; note that "foo" is a duplicate in row 0 + raw_X = [ + ["foo", "bar", "baz", "foo".encode("ascii")], + ["bar".encode("ascii"), "baz", "quux"], + ] + + for lg_n_features in (7, 9, 11, 16, 22): + n_features = 2**lg_n_features + + it = (x for x in raw_X) # iterable + + feature_hasher = FeatureHasher( + n_features=n_features, input_type="string", alternate_sign=False + ) + X = feature_hasher.transform(it) + + assert X.shape[0] == len(raw_X) + assert X.shape[1] == n_features + + assert X[0].sum() == 4 + assert X[1].sum() == 3 + + assert X.nnz == 6 + + +@pytest.mark.parametrize( + "raw_X", + [ + ["my_string", "another_string"], + (x for x in ["my_string", "another_string"]), + ], + ids=["list", "generator"], +) +def test_feature_hasher_single_string(raw_X): + """FeatureHasher raises error when a sample is a single string. + + Non-regression test for gh-13199. + """ + msg = "Samples can not be a single string" + + feature_hasher = FeatureHasher(n_features=10, input_type="string") + with pytest.raises(ValueError, match=msg): + feature_hasher.transform(raw_X) + + +def test_hashing_transform_seed(): + # check the influence of the seed when computing the hashes + raw_X = [ + ["foo", "bar", "baz", "foo".encode("ascii")], + ["bar".encode("ascii"), "baz", "quux"], + ] + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices, indptr, _ = _hashing_transform(raw_X_, 2**7, str, False) + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=0) + assert_array_equal(indices, indices_0) + assert_array_equal(indptr, indptr_0) + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices_1, _, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=1) + with pytest.raises(AssertionError): + assert_array_equal(indices, indices_1) + + +def test_feature_hasher_pairs(): + raw_X = ( + iter(d.items()) + for d in [{"foo": 1, "bar": 2}, {"baz": 3, "quux": 4, "foo": -1}] + ) + feature_hasher = FeatureHasher(n_features=16, input_type="pair") + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = sorted(np.abs(x1[x1 != 0])) + x2_nz = sorted(np.abs(x2[x2 != 0])) + assert [1, 2] == x1_nz + assert [1, 3, 4] == x2_nz + + +def test_feature_hasher_pairs_with_string_values(): + raw_X = ( + iter(d.items()) + for d in [{"foo": 1, "bar": "a"}, {"baz": "abc", "quux": 4, "foo": -1}] + ) + feature_hasher = FeatureHasher(n_features=16, input_type="pair") + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = sorted(np.abs(x1[x1 != 0])) + x2_nz = sorted(np.abs(x2[x2 != 0])) + assert [1, 1] == x1_nz + assert [1, 1, 4] == x2_nz + + raw_X = (iter(d.items()) for d in [{"bax": "abc"}, {"bax": "abc"}]) + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = np.abs(x1[x1 != 0]) + x2_nz = np.abs(x2[x2 != 0]) + assert [1] == x1_nz + assert [1] == x2_nz + assert_array_equal(x1, x2) + + +def test_hash_empty_input(): + n_features = 16 + raw_X = [[], (), iter(range(0))] + + feature_hasher = FeatureHasher(n_features=n_features, input_type="string") + X = feature_hasher.transform(raw_X) + + assert_array_equal(X.toarray(), np.zeros((len(raw_X), n_features))) + + +def test_hasher_zeros(): + # Assert that no zeros are materialized in the output. + X = FeatureHasher().transform([{"foo": 0}]) + assert X.data.shape == (0,) + + +def test_hasher_alternate_sign(): + X = [list("Thequickbrownfoxjumped")] + + Xt = FeatureHasher(alternate_sign=True, input_type="string").fit_transform(X) + assert Xt.data.min() < 0 and Xt.data.max() > 0 + + Xt = FeatureHasher(alternate_sign=False, input_type="string").fit_transform(X) + assert Xt.data.min() > 0 + + +def test_hash_collisions(): + X = [list("Thequickbrownfoxjumped")] + + Xt = FeatureHasher( + alternate_sign=True, n_features=1, input_type="string" + ).fit_transform(X) + # check that some of the hashed tokens are added + # with an opposite sign and cancel out + assert abs(Xt.data[0]) < len(X[0]) + + Xt = FeatureHasher( + alternate_sign=False, n_features=1, input_type="string" + ).fit_transform(X) + assert Xt.data[0] == len(X[0]) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py new file mode 100644 index 0000000000000000000000000000000000000000..375652c848db66996f24d4d8c52d009659e8b16b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py @@ -0,0 +1,356 @@ +# Authors: Emmanuelle Gouillart +# Gael Varoquaux +# License: BSD 3 clause + +import numpy as np +import pytest +from scipy import ndimage +from scipy.sparse.csgraph import connected_components + +from sklearn.feature_extraction.image import ( + PatchExtractor, + _extract_patches, + extract_patches_2d, + grid_to_graph, + img_to_graph, + reconstruct_from_patches_2d, +) + + +def test_img_to_graph(): + x, y = np.mgrid[:4, :4] - 10 + grad_x = img_to_graph(x) + grad_y = img_to_graph(y) + assert grad_x.nnz == grad_y.nnz + # Negative elements are the diagonal: the elements of the original + # image. Positive elements are the values of the gradient, they + # should all be equal on grad_x and grad_y + np.testing.assert_array_equal( + grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0] + ) + + +def test_img_to_graph_sparse(): + # Check that the edges are in the right position + # when using a sparse image with a singleton component + mask = np.zeros((2, 3), dtype=bool) + mask[0, 0] = 1 + mask[:, 2] = 1 + x = np.zeros((2, 3)) + x[0, 0] = 1 + x[0, 2] = -1 + x[1, 2] = -2 + grad_x = img_to_graph(x, mask=mask).todense() + desired = np.array([[1, 0, 0], [0, -1, 1], [0, 1, -2]]) + np.testing.assert_array_equal(grad_x, desired) + + +def test_grid_to_graph(): + # Checking that the function works with graphs containing no edges + size = 2 + roi_size = 1 + # Generating two convex parts with one vertex + # Thus, edges will be empty in _to_graph + mask = np.zeros((size, size), dtype=bool) + mask[0:roi_size, 0:roi_size] = True + mask[-roi_size:, -roi_size:] = True + mask = mask.reshape(size**2) + A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) + assert connected_components(A)[0] == 2 + + # check ordering + mask = np.zeros((2, 3), dtype=bool) + mask[0, 0] = 1 + mask[:, 2] = 1 + graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense() + desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) + np.testing.assert_array_equal(graph, desired) + + # Checking that the function works whatever the type of mask is + mask = np.ones((size, size), dtype=np.int16) + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask) + assert connected_components(A)[0] == 1 + + # Checking dtype of the graph + mask = np.ones((size, size)) + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=bool) + assert A.dtype == bool + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=int) + assert A.dtype == int + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64) + assert A.dtype == np.float64 + + +def test_connect_regions(raccoon_face_fxt): + face = raccoon_face_fxt + # subsample by 4 to reduce run time + face = face[::4, ::4] + for thr in (50, 150): + mask = face > thr + graph = img_to_graph(face, mask=mask) + assert ndimage.label(mask)[1] == connected_components(graph)[0] + + +def test_connect_regions_with_grid(raccoon_face_fxt): + face = raccoon_face_fxt + + # subsample by 4 to reduce run time + face = face[::4, ::4] + + mask = face > 50 + graph = grid_to_graph(*face.shape, mask=mask) + assert ndimage.label(mask)[1] == connected_components(graph)[0] + + mask = face > 150 + graph = grid_to_graph(*face.shape, mask=mask, dtype=None) + assert ndimage.label(mask)[1] == connected_components(graph)[0] + + +@pytest.fixture +def downsampled_face(raccoon_face_fxt): + face = raccoon_face_fxt + face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2] + face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2] + face = face.astype(np.float32) + face /= 16.0 + return face + + +@pytest.fixture +def orange_face(downsampled_face): + face = downsampled_face + face_color = np.zeros(face.shape + (3,)) + face_color[:, :, 0] = 256 - face + face_color[:, :, 1] = 256 - face / 2 + face_color[:, :, 2] = 256 - face / 4 + return face_color + + +def _make_images(face): + # make a collection of faces + images = np.zeros((3,) + face.shape) + images[0] = face + images[1] = face + 1 + images[2] = face + 2 + return images + + +@pytest.fixture +def downsampled_face_collection(downsampled_face): + return _make_images(downsampled_face) + + +def test_extract_patches_all(downsampled_face): + face = downsampled_face + i_h, i_w = face.shape + p_h, p_w = 16, 16 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + patches = extract_patches_2d(face, (p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_extract_patches_all_color(orange_face): + face = orange_face + i_h, i_w = face.shape[:2] + p_h, p_w = 16, 16 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + patches = extract_patches_2d(face, (p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w, 3) + + +def test_extract_patches_all_rect(downsampled_face): + face = downsampled_face + face = face[:, 32:97] + i_h, i_w = face.shape + p_h, p_w = 16, 12 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + + patches = extract_patches_2d(face, (p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_extract_patches_max_patches(downsampled_face): + face = downsampled_face + i_h, i_w = face.shape + p_h, p_w = 16, 16 + + patches = extract_patches_2d(face, (p_h, p_w), max_patches=100) + assert patches.shape == (100, p_h, p_w) + + expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1)) + patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5) + assert patches.shape == (expected_n_patches, p_h, p_w) + + with pytest.raises(ValueError): + extract_patches_2d(face, (p_h, p_w), max_patches=2.0) + with pytest.raises(ValueError): + extract_patches_2d(face, (p_h, p_w), max_patches=-1.0) + + +def test_extract_patch_same_size_image(downsampled_face): + face = downsampled_face + # Request patches of the same size as image + # Should return just the single patch a.k.a. the image + patches = extract_patches_2d(face, face.shape, max_patches=2) + assert patches.shape[0] == 1 + + +def test_extract_patches_less_than_max_patches(downsampled_face): + face = downsampled_face + i_h, i_w = face.shape + p_h, p_w = 3 * i_h // 4, 3 * i_w // 4 + # this is 3185 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + + patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_reconstruct_patches_perfect(downsampled_face): + face = downsampled_face + p_h, p_w = 16, 16 + + patches = extract_patches_2d(face, (p_h, p_w)) + face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) + np.testing.assert_array_almost_equal(face, face_reconstructed) + + +def test_reconstruct_patches_perfect_color(orange_face): + face = orange_face + p_h, p_w = 16, 16 + + patches = extract_patches_2d(face, (p_h, p_w)) + face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) + np.testing.assert_array_almost_equal(face, face_reconstructed) + + +def test_patch_extractor_fit(downsampled_face_collection): + faces = downsampled_face_collection + extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0) + assert extr == extr.fit(faces) + + +def test_patch_extractor_max_patches(downsampled_face_collection): + faces = downsampled_face_collection + i_h, i_w = faces.shape[1:3] + p_h, p_w = 8, 8 + + max_patches = 100 + expected_n_patches = len(faces) * max_patches + extr = PatchExtractor( + patch_size=(p_h, p_w), max_patches=max_patches, random_state=0 + ) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w) + + max_patches = 0.5 + expected_n_patches = len(faces) * int( + (i_h - p_h + 1) * (i_w - p_w + 1) * max_patches + ) + extr = PatchExtractor( + patch_size=(p_h, p_w), max_patches=max_patches, random_state=0 + ) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_patch_extractor_max_patches_default(downsampled_face_collection): + faces = downsampled_face_collection + extr = PatchExtractor(max_patches=100, random_state=0) + patches = extr.transform(faces) + assert patches.shape == (len(faces) * 100, 19, 25) + + +def test_patch_extractor_all_patches(downsampled_face_collection): + faces = downsampled_face_collection + i_h, i_w = faces.shape[1:3] + p_h, p_w = 8, 8 + expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) + extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_patch_extractor_color(orange_face): + faces = _make_images(orange_face) + i_h, i_w = faces.shape[1:3] + p_h, p_w = 8, 8 + expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) + extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w, 3) + + +def test_extract_patches_strided(): + image_shapes_1D = [(10,), (10,), (11,), (10,)] + patch_sizes_1D = [(1,), (2,), (3,), (8,)] + patch_steps_1D = [(1,), (1,), (4,), (2,)] + + expected_views_1D = [(10,), (9,), (3,), (2,)] + last_patch_1D = [(10,), (8,), (8,), (2,)] + + image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)] + patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)] + patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)] + + expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)] + last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)] + + image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)] + patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)] + patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)] + + expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)] + last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)] + + image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D + patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D + patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D + expected_views = expected_views_1D + expected_views_2D + expected_views_3D + last_patches = last_patch_1D + last_patch_2D + last_patch_3D + + for image_shape, patch_size, patch_step, expected_view, last_patch in zip( + image_shapes, patch_sizes, patch_steps, expected_views, last_patches + ): + image = np.arange(np.prod(image_shape)).reshape(image_shape) + patches = _extract_patches( + image, patch_shape=patch_size, extraction_step=patch_step + ) + + ndim = len(image_shape) + + assert patches.shape[:ndim] == expected_view + last_patch_slices = tuple( + slice(i, i + j, None) for i, j in zip(last_patch, patch_size) + ) + assert ( + patches[(-1, None, None) * ndim] == image[last_patch_slices].squeeze() + ).all() + + +def test_extract_patches_square(downsampled_face): + # test same patch size for all dimensions + face = downsampled_face + i_h, i_w = face.shape + p = 8 + expected_n_patches = ((i_h - p + 1), (i_w - p + 1)) + patches = _extract_patches(face, patch_shape=p) + assert patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p) + + +def test_width_patch(): + # width and height of the patch should be less than the image + x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + with pytest.raises(ValueError): + extract_patches_2d(x, (4, 1)) + with pytest.raises(ValueError): + extract_patches_2d(x, (1, 4)) + + +def test_patch_extractor_wrong_input(orange_face): + """Check that an informative error is raised if the patch_size is not valid.""" + faces = _make_images(orange_face) + err_msg = "patch_size must be a tuple of two integers" + extractor = PatchExtractor(patch_size=(8, 8, 8)) + with pytest.raises(ValueError, match=err_msg): + extractor.transform(faces) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py new file mode 100644 index 0000000000000000000000000000000000000000..7c7cac85ccc6ba3deeec862246f2118b6131fcf2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py @@ -0,0 +1,1655 @@ +import pickle +import re +import warnings +from collections import defaultdict +from collections.abc import Mapping +from functools import partial +from io import StringIO +from itertools import product + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from scipy import sparse + +from sklearn.base import clone +from sklearn.feature_extraction.text import ( + ENGLISH_STOP_WORDS, + CountVectorizer, + HashingVectorizer, + TfidfTransformer, + TfidfVectorizer, + strip_accents_ascii, + strip_accents_unicode, + strip_tags, +) +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.pipeline import Pipeline +from sklearn.svm import LinearSVC +from sklearn.utils import _IS_WASM, IS_PYPY +from sklearn.utils._testing import ( + assert_allclose_dense_sparse, + assert_almost_equal, + fails_if_pypy, + skip_if_32bit, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +JUNK_FOOD_DOCS = ( + "the pizza pizza beer copyright", + "the pizza burger beer copyright", + "the the pizza beer beer copyright", + "the burger beer beer copyright", + "the coke burger coke copyright", + "the coke burger burger", +) + +NOTJUNK_FOOD_DOCS = ( + "the salad celeri copyright", + "the salad salad sparkling water copyright", + "the the celeri celeri copyright", + "the tomato tomato salad water", + "the tomato salad water copyright", +) + +ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + +def uppercase(s): + return strip_accents_unicode(s).upper() + + +def strip_eacute(s): + return s.replace("é", "e") + + +def split_tokenize(s): + return s.split() + + +def lazy_analyze(s): + return ["the_ultimate_feature"] + + +def test_strip_accents(): + # check some classical latin accentuated symbols + a = "àáâãäåçèéêë" + expected = "aaaaaaceeee" + assert strip_accents_unicode(a) == expected + + a = "ìíîïñòóôõöùúûüý" + expected = "iiiinooooouuuuy" + assert strip_accents_unicode(a) == expected + + # check some arabic + a = "\u0625" # alef with a hamza below: إ + expected = "\u0627" # simple alef: ا + assert strip_accents_unicode(a) == expected + + # mix letters accentuated and not + a = "this is à test" + expected = "this is a test" + assert strip_accents_unicode(a) == expected + + # strings that are already decomposed + a = "o\u0308" # o with diaeresis + expected = "o" + assert strip_accents_unicode(a) == expected + + # combining marks by themselves + a = "\u0300\u0301\u0302\u0303" + expected = "" + assert strip_accents_unicode(a) == expected + + # Multiple combining marks on one character + a = "o\u0308\u0304" + expected = "o" + assert strip_accents_unicode(a) == expected + + +def test_to_ascii(): + # check some classical latin accentuated symbols + a = "àáâãäåçèéêë" + expected = "aaaaaaceeee" + assert strip_accents_ascii(a) == expected + + a = "ìíîïñòóôõöùúûüý" + expected = "iiiinooooouuuuy" + assert strip_accents_ascii(a) == expected + + # check some arabic + a = "\u0625" # halef with a hamza below + expected = "" # halef has no direct ascii match + assert strip_accents_ascii(a) == expected + + # mix letters accentuated and not + a = "this is à test" + expected = "this is a test" + assert strip_accents_ascii(a) == expected + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer)) +def test_word_analyzer_unigrams(Vectorizer): + wa = Vectorizer(strip_accents="ascii").build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "ai", + "mange", + "du", + "kangourou", + "ce", + "midi", + "etait", + "pas", + "tres", + "bon", + ] + assert wa(text) == expected + + text = "This is a test, really.\n\n I met Harry yesterday." + expected = ["this", "is", "test", "really", "met", "harry", "yesterday"] + assert wa(text) == expected + + wa = Vectorizer(input="file").build_analyzer() + text = StringIO("This is a test with a file-like object!") + expected = ["this", "is", "test", "with", "file", "like", "object"] + assert wa(text) == expected + + # with custom preprocessor + wa = Vectorizer(preprocessor=uppercase).build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "AI", + "MANGE", + "DU", + "KANGOUROU", + "CE", + "MIDI", + "ETAIT", + "PAS", + "TRES", + "BON", + ] + assert wa(text) == expected + + # with custom tokenizer + wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "j'ai", + "mange", + "du", + "kangourou", + "ce", + "midi,", + "c'etait", + "pas", + "tres", + "bon.", + ] + assert wa(text) == expected + + +def test_word_analyzer_unigrams_and_bigrams(): + wa = CountVectorizer( + analyzer="word", strip_accents="unicode", ngram_range=(1, 2) + ).build_analyzer() + + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "ai", + "mange", + "du", + "kangourou", + "ce", + "midi", + "etait", + "pas", + "tres", + "bon", + "ai mange", + "mange du", + "du kangourou", + "kangourou ce", + "ce midi", + "midi etait", + "etait pas", + "pas tres", + "tres bon", + ] + assert wa(text) == expected + + +def test_unicode_decode_error(): + # decode_error default to strict, so this should fail + # First, encode (as bytes) a unicode string. + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + text_bytes = text.encode("utf-8") + + # Then let the Analyzer try to decode it as ascii. It should fail, + # because we have given it an incorrect encoding. + wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer() + with pytest.raises(UnicodeDecodeError): + wa(text_bytes) + + ca = CountVectorizer( + analyzer="char", ngram_range=(3, 6), encoding="ascii" + ).build_analyzer() + with pytest.raises(UnicodeDecodeError): + ca(text_bytes) + + +def test_char_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="char", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "J'ai mangé du kangourou ce midi, c'était pas très bon" + expected = ["j'a", "'ai", "ai ", "i m", " ma"] + assert cnga(text)[:5] == expected + expected = ["s tres", " tres ", "tres b", "res bo", "es bon"] + assert cnga(text)[-5:] == expected + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = ["thi", "his", "is ", "s i", " is"] + assert cnga(text)[:5] == expected + + expected = [" yeste", "yester", "esterd", "sterda", "terday"] + assert cnga(text)[-5:] == expected + + cnga = CountVectorizer( + input="file", analyzer="char", ngram_range=(3, 6) + ).build_analyzer() + text = StringIO("This is a test with a file-like object!") + expected = ["thi", "his", "is ", "s i", " is"] + assert cnga(text)[:5] == expected + + +def test_char_wb_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = [" th", "thi", "his", "is ", " thi"] + assert cnga(text)[:5] == expected + + expected = ["yester", "esterd", "sterda", "terday", "erday "] + assert cnga(text)[-5:] == expected + + cnga = CountVectorizer( + input="file", analyzer="char_wb", ngram_range=(3, 6) + ).build_analyzer() + text = StringIO("A test with a file-like object!") + expected = [" a ", " te", "tes", "est", "st ", " tes"] + assert cnga(text)[:6] == expected + + +def test_word_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="word", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = ["this is test", "is test really", "test really met"] + assert cnga(text)[:3] == expected + + expected = [ + "test really met harry yesterday", + "this is test really met harry", + "is test really met harry yesterday", + ] + assert cnga(text)[-3:] == expected + + cnga_file = CountVectorizer( + input="file", analyzer="word", ngram_range=(3, 6) + ).build_analyzer() + file = StringIO(text) + assert cnga_file(file) == cnga(text) + + +def test_countvectorizer_custom_vocabulary(): + vocab = {"pizza": 0, "beer": 1} + terms = set(vocab.keys()) + + # Try a few of the supported types. + for typ in [dict, list, iter, partial(defaultdict, int)]: + v = typ(vocab) + vect = CountVectorizer(vocabulary=v) + vect.fit(JUNK_FOOD_DOCS) + if isinstance(v, Mapping): + assert vect.vocabulary_ == vocab + else: + assert set(vect.vocabulary_) == terms + X = vect.transform(JUNK_FOOD_DOCS) + assert X.shape[1] == len(terms) + v = typ(vocab) + vect = CountVectorizer(vocabulary=v) + inv = vect.inverse_transform(X) + assert len(inv) == X.shape[0] + + +def test_countvectorizer_custom_vocabulary_pipeline(): + what_we_like = ["pizza", "beer"] + pipe = Pipeline( + [ + ("count", CountVectorizer(vocabulary=what_we_like)), + ("tfidf", TfidfTransformer()), + ] + ) + X = pipe.fit_transform(ALL_FOOD_DOCS) + assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like) + assert X.shape[1] == len(what_we_like) + + +def test_countvectorizer_custom_vocabulary_repeated_indices(): + vocab = {"pizza": 0, "beer": 0} + msg = "Vocabulary contains repeated indices" + with pytest.raises(ValueError, match=msg): + vect = CountVectorizer(vocabulary=vocab) + vect.fit(["pasta_siziliana"]) + + +def test_countvectorizer_custom_vocabulary_gap_index(): + vocab = {"pizza": 1, "beer": 2} + with pytest.raises(ValueError, match="doesn't contain index"): + vect = CountVectorizer(vocabulary=vocab) + vect.fit(["pasta_verdura"]) + + +def test_countvectorizer_stop_words(): + cv = CountVectorizer() + cv.set_params(stop_words="english") + assert cv.get_stop_words() == ENGLISH_STOP_WORDS + cv.set_params(stop_words="_bad_str_stop_") + with pytest.raises(ValueError): + cv.get_stop_words() + cv.set_params(stop_words="_bad_unicode_stop_") + with pytest.raises(ValueError): + cv.get_stop_words() + stoplist = ["some", "other", "words"] + cv.set_params(stop_words=stoplist) + assert cv.get_stop_words() == set(stoplist) + + +def test_countvectorizer_empty_vocabulary(): + with pytest.raises(ValueError, match="empty vocabulary"): + vect = CountVectorizer(vocabulary=[]) + vect.fit(["foo"]) + + with pytest.raises(ValueError, match="empty vocabulary"): + v = CountVectorizer(max_df=1.0, stop_words="english") + # fit on stopwords only + v.fit(["to be or not to be", "and me too", "and so do you"]) + + +def test_fit_countvectorizer_twice(): + cv = CountVectorizer() + X1 = cv.fit_transform(ALL_FOOD_DOCS[:5]) + X2 = cv.fit_transform(ALL_FOOD_DOCS[5:]) + assert X1.shape[1] != X2.shape[1] + + +def test_countvectorizer_custom_token_pattern(): + """Check `get_feature_names_out()` when a custom token pattern is passed. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12971 + """ + corpus = [ + "This is the 1st document in my corpus.", + "This document is the 2nd sample.", + "And this is the 3rd one.", + "Is this the 4th document?", + ] + token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b" + vectorizer = CountVectorizer(token_pattern=token_pattern) + vectorizer.fit_transform(corpus) + expected = ["document", "one", "sample"] + feature_names_out = vectorizer.get_feature_names_out() + assert_array_equal(feature_names_out, expected) + + +def test_countvectorizer_custom_token_pattern_with_several_group(): + """Check that we raise an error if token pattern capture several groups. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12971 + """ + corpus = [ + "This is the 1st document in my corpus.", + "This document is the 2nd sample.", + "And this is the 3rd one.", + "Is this the 4th document?", + ] + + token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b" + err_msg = "More than 1 capturing group in token pattern" + vectorizer = CountVectorizer(token_pattern=token_pattern) + with pytest.raises(ValueError, match=err_msg): + vectorizer.fit(corpus) + + +def test_countvectorizer_uppercase_in_vocab(): + # Check that the check for uppercase in the provided vocabulary is only done at fit + # time and not at transform time (#21251) + vocabulary = ["Sample", "Upper", "Case", "Vocabulary"] + message = ( + "Upper case characters found in" + " vocabulary while 'lowercase'" + " is True. These entries will not" + " be matched with any documents" + ) + + vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary) + + with pytest.warns(UserWarning, match=message): + vectorizer.fit(vocabulary) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + vectorizer.transform(vocabulary) + + +def test_tf_transformer_feature_names_out(): + """Check get_feature_names_out for TfidfTransformer""" + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X) + + feature_names_in = ["a", "c", "b"] + feature_names_out = tr.get_feature_names_out(feature_names_in) + assert_array_equal(feature_names_in, feature_names_out) + + +def test_tf_idf_smoothing(): + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + # check normalization + assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0]) + + # this is robust to features with only zeros + X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + +@pytest.mark.xfail( + _IS_WASM, + reason=( + "no floating point exceptions, see" + " https://github.com/numpy/numpy/pull/21895#issuecomment-1311525881" + ), +) +def test_tfidf_no_smoothing(): + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=False, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + # check normalization + assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0]) + + # the lack of smoothing make IDF fragile in the presence of feature with + # only zeros + X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=False, norm="l2") + + in_warning_message = "divide by zero" + with pytest.warns(RuntimeWarning, match=in_warning_message): + tr.fit_transform(X).toarray() + + +def test_sublinear_tf(): + X = [[1], [2], [3]] + tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None) + tfidf = tr.fit_transform(X).toarray() + assert tfidf[0] == 1 + assert tfidf[1] > tfidf[0] + assert tfidf[2] > tfidf[1] + assert tfidf[1] < 2 + assert tfidf[2] < 3 + + +def test_vectorizer(): + # raw documents as an iterator + train_data = iter(ALL_FOOD_DOCS[:-1]) + test_data = [ALL_FOOD_DOCS[-1]] + n_train = len(ALL_FOOD_DOCS) - 1 + + # test without vocabulary + v1 = CountVectorizer(max_df=0.5) + counts_train = v1.fit_transform(train_data) + if hasattr(counts_train, "tocsr"): + counts_train = counts_train.tocsr() + assert counts_train[0, v1.vocabulary_["pizza"]] == 2 + + # build a vectorizer v1 with the same vocabulary as the one fitted by v1 + v2 = CountVectorizer(vocabulary=v1.vocabulary_) + + # compare that the two vectorizer give the same output on the test sample + for v in (v1, v2): + counts_test = v.transform(test_data) + if hasattr(counts_test, "tocsr"): + counts_test = counts_test.tocsr() + + vocabulary = v.vocabulary_ + assert counts_test[0, vocabulary["salad"]] == 1 + assert counts_test[0, vocabulary["tomato"]] == 1 + assert counts_test[0, vocabulary["water"]] == 1 + + # stop word from the fixed list + assert "the" not in vocabulary + + # stop word found automatically by the vectorizer DF thresholding + # words that are high frequent across the complete corpus are likely + # to be not informative (either real stop words of extraction + # artifacts) + assert "copyright" not in vocabulary + + # not present in the sample + assert counts_test[0, vocabulary["coke"]] == 0 + assert counts_test[0, vocabulary["burger"]] == 0 + assert counts_test[0, vocabulary["beer"]] == 0 + assert counts_test[0, vocabulary["pizza"]] == 0 + + # test tf-idf + t1 = TfidfTransformer(norm="l1") + tfidf = t1.fit(counts_train).transform(counts_train).toarray() + assert len(t1.idf_) == len(v1.vocabulary_) + assert tfidf.shape == (n_train, len(v1.vocabulary_)) + + # test tf-idf with new data + tfidf_test = t1.transform(counts_test).toarray() + assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_)) + + # test tf alone + t2 = TfidfTransformer(norm="l1", use_idf=False) + tf = t2.fit(counts_train).transform(counts_train).toarray() + assert not hasattr(t2, "idf_") + + # test idf transform with unlearned idf vector + t3 = TfidfTransformer(use_idf=True) + with pytest.raises(ValueError): + t3.transform(counts_train) + + # L1-normalized term frequencies sum to one + assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train) + + # test the direct tfidf vectorizer + # (equivalent to term count vectorizer + tfidf transformer) + train_data = iter(ALL_FOOD_DOCS[:-1]) + tv = TfidfVectorizer(norm="l1") + + tv.max_df = v1.max_df + tfidf2 = tv.fit_transform(train_data).toarray() + assert not tv.fixed_vocabulary_ + assert_array_almost_equal(tfidf, tfidf2) + + # test the direct tfidf vectorizer with new data + tfidf_test2 = tv.transform(test_data).toarray() + assert_array_almost_equal(tfidf_test, tfidf_test2) + + # test transform on unfitted vectorizer with empty vocabulary + v3 = CountVectorizer(vocabulary=None) + with pytest.raises(ValueError): + v3.transform(train_data) + + # ascii preprocessor? + v3.set_params(strip_accents="ascii", lowercase=False) + processor = v3.build_preprocessor() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = strip_accents_ascii(text) + result = processor(text) + assert expected == result + + # error on bad strip_accents param + v3.set_params(strip_accents="_gabbledegook_", preprocessor=None) + with pytest.raises(ValueError): + v3.build_preprocessor() + + # error with bad analyzer type + v3.set_params = "_invalid_analyzer_type_" + with pytest.raises(ValueError): + v3.build_analyzer() + + +def test_tfidf_vectorizer_setters(): + norm, use_idf, smooth_idf, sublinear_tf = "l2", False, False, False + tv = TfidfVectorizer( + norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf + ) + tv.fit(JUNK_FOOD_DOCS) + assert tv._tfidf.norm == norm + assert tv._tfidf.use_idf == use_idf + assert tv._tfidf.smooth_idf == smooth_idf + assert tv._tfidf.sublinear_tf == sublinear_tf + + # assigning value to `TfidfTransformer` should not have any effect until + # fitting + tv.norm = "l1" + tv.use_idf = True + tv.smooth_idf = True + tv.sublinear_tf = True + assert tv._tfidf.norm == norm + assert tv._tfidf.use_idf == use_idf + assert tv._tfidf.smooth_idf == smooth_idf + assert tv._tfidf.sublinear_tf == sublinear_tf + + tv.fit(JUNK_FOOD_DOCS) + assert tv._tfidf.norm == tv.norm + assert tv._tfidf.use_idf == tv.use_idf + assert tv._tfidf.smooth_idf == tv.smooth_idf + assert tv._tfidf.sublinear_tf == tv.sublinear_tf + + +@fails_if_pypy +def test_hashing_vectorizer(): + v = HashingVectorizer() + X = v.transform(ALL_FOOD_DOCS) + token_nnz = X.nnz + assert X.shape == (len(ALL_FOOD_DOCS), v.n_features) + assert X.dtype == v.dtype + + # By default the hashed values receive a random sign and l2 normalization + # makes the feature values bounded + assert np.min(X.data) > -1 + assert np.min(X.data) < 0 + assert np.max(X.data) > 0 + assert np.max(X.data) < 1 + + # Check that the rows are normalized + for i in range(X.shape[0]): + assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0) + + # Check vectorization with some non-default parameters + v = HashingVectorizer(ngram_range=(1, 2), norm="l1") + X = v.transform(ALL_FOOD_DOCS) + assert X.shape == (len(ALL_FOOD_DOCS), v.n_features) + assert X.dtype == v.dtype + + # ngrams generate more non zeros + ngrams_nnz = X.nnz + assert ngrams_nnz > token_nnz + assert ngrams_nnz < 2 * token_nnz + + # makes the feature values bounded + assert np.min(X.data) > -1 + assert np.max(X.data) < 1 + + # Check that the rows are normalized + for i in range(X.shape[0]): + assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0) + + +def test_feature_names(): + cv = CountVectorizer(max_df=0.5) + + # test for Value error on unfitted/empty vocabulary + with pytest.raises(ValueError): + cv.get_feature_names_out() + assert not cv.fixed_vocabulary_ + + # test for vocabulary learned from data + X = cv.fit_transform(ALL_FOOD_DOCS) + n_samples, n_features = X.shape + assert len(cv.vocabulary_) == n_features + + feature_names = cv.get_feature_names_out() + assert isinstance(feature_names, np.ndarray) + assert feature_names.dtype == object + + assert len(feature_names) == n_features + assert_array_equal( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ], + feature_names, + ) + + for idx, name in enumerate(feature_names): + assert idx == cv.vocabulary_.get(name) + + # test for custom vocabulary + vocab = [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + + cv = CountVectorizer(vocabulary=vocab) + feature_names = cv.get_feature_names_out() + assert_array_equal( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ], + feature_names, + ) + assert cv.fixed_vocabulary_ + + for idx, name in enumerate(feature_names): + assert idx == cv.vocabulary_.get(name) + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer)) +def test_vectorizer_max_features(Vectorizer): + expected_vocabulary = {"burger", "beer", "salad", "pizza"} + expected_stop_words = { + "celeri", + "tomato", + "copyright", + "coke", + "sparkling", + "water", + "the", + } + + # test bounded number of extracted features + vectorizer = Vectorizer(max_df=0.6, max_features=4) + vectorizer.fit(ALL_FOOD_DOCS) + assert set(vectorizer.vocabulary_) == expected_vocabulary + assert vectorizer.stop_words_ == expected_stop_words + + +def test_count_vectorizer_max_features(): + # Regression test: max_features didn't work correctly in 0.14. + + cv_1 = CountVectorizer(max_features=1) + cv_3 = CountVectorizer(max_features=3) + cv_None = CountVectorizer(max_features=None) + + counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + + features_1 = cv_1.get_feature_names_out() + features_3 = cv_3.get_feature_names_out() + features_None = cv_None.get_feature_names_out() + + # The most common feature is "the", with frequency 7. + assert 7 == counts_1.max() + assert 7 == counts_3.max() + assert 7 == counts_None.max() + + # The most common feature should be the same + assert "the" == features_1[np.argmax(counts_1)] + assert "the" == features_3[np.argmax(counts_3)] + assert "the" == features_None[np.argmax(counts_None)] + + +def test_vectorizer_max_df(): + test_data = ["abc", "dea", "eat"] + vect = CountVectorizer(analyzer="char", max_df=1.0) + vect.fit(test_data) + assert "a" in vect.vocabulary_.keys() + assert len(vect.vocabulary_.keys()) == 6 + assert len(vect.stop_words_) == 0 + + vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5 + vect.fit(test_data) + assert "a" not in vect.vocabulary_.keys() # {ae} ignored + assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain + assert "a" in vect.stop_words_ + assert len(vect.stop_words_) == 2 + + vect.max_df = 1 + vect.fit(test_data) + assert "a" not in vect.vocabulary_.keys() # {ae} ignored + assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain + assert "a" in vect.stop_words_ + assert len(vect.stop_words_) == 2 + + +def test_vectorizer_min_df(): + test_data = ["abc", "dea", "eat"] + vect = CountVectorizer(analyzer="char", min_df=1) + vect.fit(test_data) + assert "a" in vect.vocabulary_.keys() + assert len(vect.vocabulary_.keys()) == 6 + assert len(vect.stop_words_) == 0 + + vect.min_df = 2 + vect.fit(test_data) + assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored + assert len(vect.vocabulary_.keys()) == 2 # {ae} remain + assert "c" in vect.stop_words_ + assert len(vect.stop_words_) == 4 + + vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4 + vect.fit(test_data) + assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored + assert len(vect.vocabulary_.keys()) == 1 # {a} remains + assert "c" in vect.stop_words_ + assert len(vect.stop_words_) == 5 + + +def test_count_binary_occurrences(): + # by default multiple occurrences are counted as longs + test_data = ["aaabc", "abbde"] + vect = CountVectorizer(analyzer="char", max_df=1.0) + X = vect.fit_transform(test_data).toarray() + assert_array_equal(["a", "b", "c", "d", "e"], vect.get_feature_names_out()) + assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X) + + # using boolean features, we can fetch the binary occurrence info + # instead. + vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True) + X = vect.fit_transform(test_data).toarray() + assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X) + + # check the ability to change the dtype + vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32) + X_sparse = vect.fit_transform(test_data) + assert X_sparse.dtype == np.float32 + + +@fails_if_pypy +def test_hashed_binary_occurrences(): + # by default multiple occurrences are counted as longs + test_data = ["aaabc", "abbde"] + vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None) + X = vect.transform(test_data) + assert np.max(X[0:1].data) == 3 + assert np.max(X[1:2].data) == 2 + assert X.dtype == np.float64 + + # using boolean features, we can fetch the binary occurrence info + # instead. + vect = HashingVectorizer( + analyzer="char", alternate_sign=False, binary=True, norm=None + ) + X = vect.transform(test_data) + assert np.max(X.data) == 1 + assert X.dtype == np.float64 + + # check the ability to change the dtype + vect = HashingVectorizer( + analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64 + ) + X = vect.transform(test_data) + assert X.dtype == np.float64 + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer)) +def test_vectorizer_inverse_transform(Vectorizer): + # raw documents + data = ALL_FOOD_DOCS + vectorizer = Vectorizer() + transformed_data = vectorizer.fit_transform(data) + inversed_data = vectorizer.inverse_transform(transformed_data) + assert isinstance(inversed_data, list) + + analyze = vectorizer.build_analyzer() + for doc, inversed_terms in zip(data, inversed_data): + terms = np.sort(np.unique(analyze(doc))) + inversed_terms = np.sort(np.unique(inversed_terms)) + assert_array_equal(terms, inversed_terms) + + assert sparse.issparse(transformed_data) + assert transformed_data.format == "csr" + + # Test that inverse_transform also works with numpy arrays and + # scipy + transformed_data2 = transformed_data.toarray() + inversed_data2 = vectorizer.inverse_transform(transformed_data2) + for terms, terms2 in zip(inversed_data, inversed_data2): + assert_array_equal(np.sort(terms), np.sort(terms2)) + + # Check that inverse_transform also works on non CSR sparse data: + transformed_data3 = transformed_data.tocsc() + inversed_data3 = vectorizer.inverse_transform(transformed_data3) + for terms, terms3 in zip(inversed_data, inversed_data3): + assert_array_equal(np.sort(terms), np.sort(terms3)) + + +def test_count_vectorizer_pipeline_grid_selection(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + # split the dataset for model development and final evaluation + train_data, test_data, target_train, target_test = train_test_split( + data, target, test_size=0.2, random_state=0 + ) + + pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + parameters = { + "vect__ngram_range": [(1, 1), (1, 2)], + "svc__loss": ("hinge", "squared_hinge"), + } + + # find the best parameters for both the feature extraction and the + # classifier + grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3) + + # Check that the best model found by grid search is 100% correct on the + # held out evaluation set. + pred = grid_search.fit(train_data, target_train).predict(test_data) + assert_array_equal(pred, target_test) + + # on this toy dataset bigram representation which is used in the last of + # the grid_search is considered the best estimator since they all converge + # to 100% accuracy models + assert grid_search.best_score_ == 1.0 + best_vectorizer = grid_search.best_estimator_.named_steps["vect"] + assert best_vectorizer.ngram_range == (1, 1) + + +def test_vectorizer_pipeline_grid_selection(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + # split the dataset for model development and final evaluation + train_data, test_data, target_train, target_test = train_test_split( + data, target, test_size=0.1, random_state=0 + ) + + pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + parameters = { + "vect__ngram_range": [(1, 1), (1, 2)], + "vect__norm": ("l1", "l2"), + "svc__loss": ("hinge", "squared_hinge"), + } + + # find the best parameters for both the feature extraction and the + # classifier + grid_search = GridSearchCV(pipeline, parameters, n_jobs=1) + + # Check that the best model found by grid search is 100% correct on the + # held out evaluation set. + pred = grid_search.fit(train_data, target_train).predict(test_data) + assert_array_equal(pred, target_test) + + # on this toy dataset bigram representation which is used in the last of + # the grid_search is considered the best estimator since they all converge + # to 100% accuracy models + assert grid_search.best_score_ == 1.0 + best_vectorizer = grid_search.best_estimator_.named_steps["vect"] + assert best_vectorizer.ngram_range == (1, 1) + assert best_vectorizer.norm == "l2" + assert not best_vectorizer.fixed_vocabulary_ + + +def test_vectorizer_pipeline_cross_validation(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + cv_scores = cross_val_score(pipeline, data, target, cv=3) + assert_array_equal(cv_scores, [1.0, 1.0, 1.0]) + + +@fails_if_pypy +def test_vectorizer_unicode(): + # tests that the count vectorizer works with cyrillic. + document = ( + "Машинное обучение — обширный подраздел искусственного " + "интеллекта, изучающий методы построения алгоритмов, " + "способных обучаться." + ) + + vect = CountVectorizer() + X_counted = vect.fit_transform([document]) + assert X_counted.shape == (1, 12) + + vect = HashingVectorizer(norm=None, alternate_sign=False) + X_hashed = vect.transform([document]) + assert X_hashed.shape == (1, 2**20) + + # No collisions on such a small dataset + assert X_counted.nnz == X_hashed.nnz + + # When norm is None and not alternate_sign, the tokens are counted up to + # collisions + assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data)) + + +def test_tfidf_vectorizer_with_fixed_vocabulary(): + # non regression smoke test for inheritance issues + vocabulary = ["pizza", "celeri"] + vect = TfidfVectorizer(vocabulary=vocabulary) + X_1 = vect.fit_transform(ALL_FOOD_DOCS) + X_2 = vect.transform(ALL_FOOD_DOCS) + assert_array_almost_equal(X_1.toarray(), X_2.toarray()) + assert vect.fixed_vocabulary_ + + +def test_pickling_vectorizer(): + instances = [ + HashingVectorizer(), + HashingVectorizer(norm="l1"), + HashingVectorizer(binary=True), + HashingVectorizer(ngram_range=(1, 2)), + CountVectorizer(), + CountVectorizer(preprocessor=strip_tags), + CountVectorizer(analyzer=lazy_analyze), + CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), + CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), + TfidfVectorizer(), + TfidfVectorizer(analyzer=lazy_analyze), + TfidfVectorizer().fit(JUNK_FOOD_DOCS), + ] + + for orig in instances: + s = pickle.dumps(orig) + copy = pickle.loads(s) + assert type(copy) == orig.__class__ + assert copy.get_params() == orig.get_params() + if IS_PYPY and isinstance(orig, HashingVectorizer): + continue + else: + assert_allclose_dense_sparse( + copy.fit_transform(JUNK_FOOD_DOCS), + orig.fit_transform(JUNK_FOOD_DOCS), + ) + + +@pytest.mark.parametrize( + "factory", + [ + CountVectorizer.build_analyzer, + CountVectorizer.build_preprocessor, + CountVectorizer.build_tokenizer, + ], +) +def test_pickling_built_processors(factory): + """Tokenizers cannot be pickled + https://github.com/scikit-learn/scikit-learn/issues/12833 + """ + vec = CountVectorizer() + function = factory(vec) + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + roundtripped_function = pickle.loads(pickle.dumps(function)) + expected = function(text) + result = roundtripped_function(text) + assert result == expected + + +def test_countvectorizer_vocab_sets_when_pickling(): + # ensure that vocabulary of type set is coerced to a list to + # preserve iteration ordering after deserialization + rng = np.random.RandomState(0) + vocab_words = np.array( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + ) + for x in range(0, 100): + vocab_set = set(rng.choice(vocab_words, size=5, replace=False)) + cv = CountVectorizer(vocabulary=vocab_set) + unpickled_cv = pickle.loads(pickle.dumps(cv)) + cv.fit(ALL_FOOD_DOCS) + unpickled_cv.fit(ALL_FOOD_DOCS) + assert_array_equal( + cv.get_feature_names_out(), unpickled_cv.get_feature_names_out() + ) + + +def test_countvectorizer_vocab_dicts_when_pickling(): + rng = np.random.RandomState(0) + vocab_words = np.array( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + ) + for x in range(0, 100): + vocab_dict = dict() + words = rng.choice(vocab_words, size=5, replace=False) + for y in range(0, 5): + vocab_dict[words[y]] = y + cv = CountVectorizer(vocabulary=vocab_dict) + unpickled_cv = pickle.loads(pickle.dumps(cv)) + cv.fit(ALL_FOOD_DOCS) + unpickled_cv.fit(ALL_FOOD_DOCS) + assert_array_equal( + cv.get_feature_names_out(), unpickled_cv.get_feature_names_out() + ) + + +def test_stop_words_removal(): + # Ensure that deleting the stop_words_ attribute doesn't affect transform + + fitted_vectorizers = ( + TfidfVectorizer().fit(JUNK_FOOD_DOCS), + CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), + CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), + ) + + for vect in fitted_vectorizers: + vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + vect.stop_words_ = None + stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + delattr(vect, "stop_words_") + stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + assert_array_equal(stop_None_transform, vect_transform) + assert_array_equal(stop_del_transform, vect_transform) + + +def test_pickling_transformer(): + X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) + orig = TfidfTransformer().fit(X) + s = pickle.dumps(orig) + copy = pickle.loads(s) + assert type(copy) == orig.__class__ + assert_array_equal(copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray()) + + +def test_transformer_idf_setter(): + X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) + orig = TfidfTransformer().fit(X) + copy = TfidfTransformer() + copy.idf_ = orig.idf_ + assert_array_equal(copy.transform(X).toarray(), orig.transform(X).toarray()) + + +def test_tfidf_vectorizer_setter(): + orig = TfidfVectorizer(use_idf=True) + orig.fit(JUNK_FOOD_DOCS) + copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True) + copy.idf_ = orig.idf_ + assert_array_equal( + copy.transform(JUNK_FOOD_DOCS).toarray(), + orig.transform(JUNK_FOOD_DOCS).toarray(), + ) + # `idf_` cannot be set with `use_idf=False` + copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=False) + err_msg = "`idf_` cannot be set when `user_idf=False`." + with pytest.raises(ValueError, match=err_msg): + copy.idf_ = orig.idf_ + + +def test_tfidfvectorizer_invalid_idf_attr(): + vect = TfidfVectorizer(use_idf=True) + vect.fit(JUNK_FOOD_DOCS) + copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True) + expected_idf_len = len(vect.idf_) + invalid_idf = [1.0] * (expected_idf_len + 1) + with pytest.raises(ValueError): + setattr(copy, "idf_", invalid_idf) + + +def test_non_unique_vocab(): + vocab = ["a", "b", "c", "a", "a"] + vect = CountVectorizer(vocabulary=vocab) + with pytest.raises(ValueError): + vect.fit([]) + + +@fails_if_pypy +def test_hashingvectorizer_nan_in_docs(): + # np.nan can appear when using pandas to load text fields from a csv file + # with missing values. + message = "np.nan is an invalid document, expected byte or unicode string." + exception = ValueError + + def func(): + hv = HashingVectorizer() + hv.fit_transform(["hello world", np.nan, "hello hello"]) + + with pytest.raises(exception, match=message): + func() + + +def test_tfidfvectorizer_binary(): + # Non-regression test: TfidfVectorizer used to ignore its "binary" param. + v = TfidfVectorizer(binary=True, use_idf=False, norm=None) + assert v.binary + + X = v.fit_transform(["hello world", "hello hello"]).toarray() + assert_array_equal(X.ravel(), [1, 1, 1, 0]) + X2 = v.transform(["hello world", "hello hello"]).toarray() + assert_array_equal(X2.ravel(), [1, 1, 1, 0]) + + +def test_tfidfvectorizer_export_idf(): + vect = TfidfVectorizer(use_idf=True) + vect.fit(JUNK_FOOD_DOCS) + assert_array_almost_equal(vect.idf_, vect._tfidf.idf_) + + +def test_vectorizer_vocab_clone(): + vect_vocab = TfidfVectorizer(vocabulary=["the"]) + vect_vocab_clone = clone(vect_vocab) + vect_vocab.fit(ALL_FOOD_DOCS) + vect_vocab_clone.fit(ALL_FOOD_DOCS) + assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_ + + +@pytest.mark.parametrize( + "Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer) +) +def test_vectorizer_string_object_as_input(Vectorizer): + message = "Iterable over raw text documents expected, string object received." + vec = Vectorizer() + + with pytest.raises(ValueError, match=message): + vec.fit_transform("hello world!") + + with pytest.raises(ValueError, match=message): + vec.fit("hello world!") + vec.fit(["some text", "some other text"]) + + with pytest.raises(ValueError, match=message): + vec.transform("hello world!") + + +@pytest.mark.parametrize("X_dtype", [np.float32, np.float64]) +def test_tfidf_transformer_type(X_dtype): + X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42) + X_trans = TfidfTransformer().fit_transform(X) + assert X_trans.dtype == X.dtype + + +@pytest.mark.parametrize( + "csc_container, csr_container", product(CSC_CONTAINERS, CSR_CONTAINERS) +) +def test_tfidf_transformer_sparse(csc_container, csr_container): + X = sparse.rand(10, 20000, dtype=np.float64, random_state=42) + X_csc = csc_container(X) + X_csr = csr_container(X) + + X_trans_csc = TfidfTransformer().fit_transform(X_csc) + X_trans_csr = TfidfTransformer().fit_transform(X_csr) + assert_allclose_dense_sparse(X_trans_csc, X_trans_csr) + assert X_trans_csc.format == X_trans_csr.format + + +@pytest.mark.parametrize( + "vectorizer_dtype, output_dtype, warning_expected", + [ + (np.int32, np.float64, True), + (np.int64, np.float64, True), + (np.float32, np.float32, False), + (np.float64, np.float64, False), + ], +) +def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype, warning_expected): + X = np.array(["numpy", "scipy", "sklearn"]) + vectorizer = TfidfVectorizer(dtype=vectorizer_dtype) + + warning_msg_match = "'dtype' should be used." + if warning_expected: + with pytest.warns(UserWarning, match=warning_msg_match): + X_idf = vectorizer.fit_transform(X) + else: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + X_idf = vectorizer.fit_transform(X) + assert X_idf.dtype == output_dtype + + +@pytest.mark.parametrize( + "vec", + [ + HashingVectorizer(ngram_range=(2, 1)), + CountVectorizer(ngram_range=(2, 1)), + TfidfVectorizer(ngram_range=(2, 1)), + ], +) +def test_vectorizers_invalid_ngram_range(vec): + # vectorizers could be initialized with invalid ngram range + # test for raising error message + invalid_range = vec.ngram_range + message = re.escape( + f"Invalid value for ngram_range={invalid_range} " + "lower boundary larger than the upper boundary." + ) + if isinstance(vec, HashingVectorizer) and IS_PYPY: + pytest.xfail(reason="HashingVectorizer is not supported on PyPy") + + with pytest.raises(ValueError, match=message): + vec.fit(["good news everyone"]) + + with pytest.raises(ValueError, match=message): + vec.fit_transform(["good news everyone"]) + + if isinstance(vec, HashingVectorizer): + with pytest.raises(ValueError, match=message): + vec.transform(["good news everyone"]) + + +def _check_stop_words_consistency(estimator): + stop_words = estimator.get_stop_words() + tokenize = estimator.build_tokenizer() + preprocess = estimator.build_preprocessor() + return estimator._check_stop_words_consistency(stop_words, preprocess, tokenize) + + +@fails_if_pypy +def test_vectorizer_stop_words_inconsistent(): + lstr = r"\['and', 'll', 've'\]" + message = ( + "Your stop_words may be inconsistent with your " + "preprocessing. Tokenizing the stop words generated " + "tokens %s not in stop_words." % lstr + ) + for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]: + vec.set_params(stop_words=["you've", "you", "you'll", "AND"]) + with pytest.warns(UserWarning, match=message): + vec.fit_transform(["hello world"]) + # reset stop word validation + del vec._stop_words_id + assert _check_stop_words_consistency(vec) is False + + # Only one warning per stop list + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + vec.fit_transform(["hello world"]) + assert _check_stop_words_consistency(vec) is None + + # Test caching of inconsistency assessment + vec.set_params(stop_words=["you've", "you", "you'll", "blah", "AND"]) + with pytest.warns(UserWarning, match=message): + vec.fit_transform(["hello world"]) + + +@skip_if_32bit +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_countvectorizer_sort_features_64bit_sparse_indices(csr_container): + """ + Check that CountVectorizer._sort_features preserves the dtype of its sparse + feature matrix. + + This test is skipped on 32bit platforms, see: + https://github.com/scikit-learn/scikit-learn/pull/11295 + for more details. + """ + + X = csr_container((5, 5), dtype=np.int64) + + # force indices and indptr to int64. + INDICES_DTYPE = np.int64 + X.indices = X.indices.astype(INDICES_DTYPE) + X.indptr = X.indptr.astype(INDICES_DTYPE) + + vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2} + + Xs = CountVectorizer()._sort_features(X, vocabulary) + + assert INDICES_DTYPE == Xs.indices.dtype + + +@fails_if_pypy +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +def test_stop_word_validation_custom_preprocessor(Estimator): + data = [{"text": "some text"}] + + vec = Estimator() + assert _check_stop_words_consistency(vec) is True + + vec = Estimator(preprocessor=lambda x: x["text"], stop_words=["and"]) + assert _check_stop_words_consistency(vec) == "error" + # checks are cached + assert _check_stop_words_consistency(vec) is None + vec.fit_transform(data) + + class CustomEstimator(Estimator): + def build_preprocessor(self): + return lambda x: x["text"] + + vec = CustomEstimator(stop_words=["and"]) + assert _check_stop_words_consistency(vec) == "error" + + vec = Estimator( + tokenizer=lambda doc: re.compile(r"\w{1,}").findall(doc), stop_words=["and"] + ) + assert _check_stop_words_consistency(vec) is True + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +@pytest.mark.parametrize( + "input_type, err_type, err_msg", + [ + ("filename", FileNotFoundError, ""), + ("file", AttributeError, "'str' object has no attribute 'read'"), + ], +) +def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg): + if issubclass(Estimator, HashingVectorizer) and IS_PYPY: + pytest.xfail("HashingVectorizer is not supported on PyPy") + data = ["this is text, not file or filename"] + with pytest.raises(err_type, match=err_msg): + Estimator(analyzer=lambda x: x.split(), input=input_type).fit_transform(data) + + +@pytest.mark.parametrize( + "Estimator", + [ + CountVectorizer, + TfidfVectorizer, + pytest.param(HashingVectorizer, marks=fails_if_pypy), + ], +) +@pytest.mark.parametrize( + "analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()] +) +@pytest.mark.parametrize("input_type", ["file", "filename"]) +def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type): + data = ["this is text, not file or filename"] + with pytest.raises((FileNotFoundError, AttributeError)): + Estimator(analyzer=analyzer, input=input_type).fit_transform(data) + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +def test_callable_analyzer_reraise_error(tmpdir, Estimator): + # check if a custom exception from the analyzer is shown to the user + def analyzer(doc): + raise Exception("testing") + + if issubclass(Estimator, HashingVectorizer) and IS_PYPY: + pytest.xfail("HashingVectorizer is not supported on PyPy") + + f = tmpdir.join("file.txt") + f.write("sample content\n") + + with pytest.raises(Exception, match="testing"): + Estimator(analyzer=analyzer, input="file").fit_transform([f]) + + +@pytest.mark.parametrize( + "Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer] +) +@pytest.mark.parametrize( + ( + "stop_words, tokenizer, preprocessor, ngram_range, token_pattern," + "analyzer, unused_name, ovrd_name, ovrd_msg" + ), + [ + ( + ["you've", "you'll"], + None, + None, + (1, 1), + None, + "char", + "'stop_words'", + "'analyzer'", + "!= 'word'", + ), + ( + None, + lambda s: s.split(), + None, + (1, 1), + None, + "char", + "'tokenizer'", + "'analyzer'", + "!= 'word'", + ), + ( + None, + lambda s: s.split(), + None, + (1, 1), + r"\w+", + "word", + "'token_pattern'", + "'tokenizer'", + "is not None", + ), + ( + None, + None, + lambda s: s.upper(), + (1, 1), + r"\w+", + lambda s: s.upper(), + "'preprocessor'", + "'analyzer'", + "is callable", + ), + ( + None, + None, + None, + (1, 2), + None, + lambda s: s.upper(), + "'ngram_range'", + "'analyzer'", + "is callable", + ), + ( + None, + None, + None, + (1, 1), + r"\w+", + "char", + "'token_pattern'", + "'analyzer'", + "!= 'word'", + ), + ], +) +def test_unused_parameters_warn( + Vectorizer, + stop_words, + tokenizer, + preprocessor, + ngram_range, + token_pattern, + analyzer, + unused_name, + ovrd_name, + ovrd_msg, +): + train_data = JUNK_FOOD_DOCS + # setting parameter and checking for corresponding warning messages + vect = Vectorizer() + vect.set_params( + stop_words=stop_words, + tokenizer=tokenizer, + preprocessor=preprocessor, + ngram_range=ngram_range, + token_pattern=token_pattern, + analyzer=analyzer, + ) + msg = "The parameter %s will not be used since %s %s" % ( + unused_name, + ovrd_name, + ovrd_msg, + ) + with pytest.warns(UserWarning, match=msg): + vect.fit(train_data) + + +@pytest.mark.parametrize( + "Vectorizer, X", + ( + (HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]), + (CountVectorizer, JUNK_FOOD_DOCS), + ), +) +def test_n_features_in(Vectorizer, X): + # For vectorizers, n_features_in_ does not make sense + vectorizer = Vectorizer() + assert not hasattr(vectorizer, "n_features_in_") + vectorizer.fit(X) + assert not hasattr(vectorizer, "n_features_in_") + + +def test_tie_breaking_sample_order_invariance(): + # Checks the sample order invariance when setting max_features + # non-regression test for #17939 + vec = CountVectorizer(max_features=1) + vocab1 = vec.fit(["hello", "world"]).vocabulary_ + vocab2 = vec.fit(["world", "hello"]).vocabulary_ + assert vocab1 == vocab2 + + +@fails_if_pypy +def test_nonnegative_hashing_vectorizer_result_indices(): + # add test for pr 19035 + hashing = HashingVectorizer(n_features=1000000, ngram_range=(2, 3)) + indices = hashing.transform(["22pcs efuture"]).indices + assert indices[0] >= 0 + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, TfidfTransformer, HashingVectorizer] +) +def test_vectorizers_do_not_have_set_output(Estimator): + """Check that vectorizers do not define set_output.""" + est = Estimator() + assert not hasattr(est, "set_output") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/text.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/text.py new file mode 100644 index 0000000000000000000000000000000000000000..29104c29e74acef6bcad1f1cc71c24757ea951a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/text.py @@ -0,0 +1,2166 @@ +# Authors: Olivier Grisel +# Mathieu Blondel +# Lars Buitinck +# Robert Layton +# Jochen Wersdörfer +# Roman Sinayev +# +# License: BSD 3 clause +""" +The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to +build feature vectors from text documents. +""" + +import array +import re +import unicodedata +import warnings +from collections import defaultdict +from collections.abc import Mapping +from functools import partial +from numbers import Integral +from operator import itemgetter + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context +from ..exceptions import NotFittedError +from ..preprocessing import normalize +from ..utils import _IS_32BIT +from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions +from ..utils.validation import FLOAT_DTYPES, check_array, check_is_fitted +from ._hash import FeatureHasher +from ._stop_words import ENGLISH_STOP_WORDS + +__all__ = [ + "HashingVectorizer", + "CountVectorizer", + "ENGLISH_STOP_WORDS", + "TfidfTransformer", + "TfidfVectorizer", + "strip_accents_ascii", + "strip_accents_unicode", + "strip_tags", +] + + +def _preprocess(doc, accent_function=None, lower=False): + """Chain together an optional series of text preprocessing steps to + apply to a document. + + Parameters + ---------- + doc: str + The string to preprocess + accent_function: callable, default=None + Function for handling accented characters. Common strategies include + normalizing and removing. + lower: bool, default=False + Whether to use str.lower to lowercase all of the text + + Returns + ------- + doc: str + preprocessed string + """ + if lower: + doc = doc.lower() + if accent_function is not None: + doc = accent_function(doc) + return doc + + +def _analyze( + doc, + analyzer=None, + tokenizer=None, + ngrams=None, + preprocessor=None, + decoder=None, + stop_words=None, +): + """Chain together an optional series of text processing steps to go from + a single document to ngrams, with or without tokenizing or preprocessing. + + If analyzer is used, only the decoder argument is used, as the analyzer is + intended to replace the preprocessor, tokenizer, and ngrams steps. + + Parameters + ---------- + analyzer: callable, default=None + tokenizer: callable, default=None + ngrams: callable, default=None + preprocessor: callable, default=None + decoder: callable, default=None + stop_words: list, default=None + + Returns + ------- + ngrams: list + A sequence of tokens, possibly with pairs, triples, etc. + """ + + if decoder is not None: + doc = decoder(doc) + if analyzer is not None: + doc = analyzer(doc) + else: + if preprocessor is not None: + doc = preprocessor(doc) + if tokenizer is not None: + doc = tokenizer(doc) + if ngrams is not None: + if stop_words is not None: + doc = ngrams(doc, stop_words) + else: + doc = ngrams(doc) + return doc + + +def strip_accents_unicode(s): + """Transform accentuated unicode symbols into their simple counterpart. + + Warning: the python-level loop and join operations make this + implementation 20 times slower than the strip_accents_ascii basic + normalization. + + Parameters + ---------- + s : str + The string to strip. + + Returns + ------- + s : str + The stripped string. + + See Also + -------- + strip_accents_ascii : Remove accentuated char for any unicode symbol that + has a direct ASCII equivalent. + """ + try: + # If `s` is ASCII-compatible, then it does not contain any accented + # characters and we can avoid an expensive list comprehension + s.encode("ASCII", errors="strict") + return s + except UnicodeEncodeError: + normalized = unicodedata.normalize("NFKD", s) + return "".join([c for c in normalized if not unicodedata.combining(c)]) + + +def strip_accents_ascii(s): + """Transform accentuated unicode symbols into ascii or nothing. + + Warning: this solution is only suited for languages that have a direct + transliteration to ASCII symbols. + + Parameters + ---------- + s : str + The string to strip. + + Returns + ------- + s : str + The stripped string. + + See Also + -------- + strip_accents_unicode : Remove accentuated char for any unicode symbol. + """ + nkfd_form = unicodedata.normalize("NFKD", s) + return nkfd_form.encode("ASCII", "ignore").decode("ASCII") + + +def strip_tags(s): + """Basic regexp based HTML / XML tag stripper function. + + For serious HTML/XML preprocessing you should rather use an external + library such as lxml or BeautifulSoup. + + Parameters + ---------- + s : str + The string to strip. + + Returns + ------- + s : str + The stripped string. + """ + return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) + + +def _check_stop_list(stop): + if stop == "english": + return ENGLISH_STOP_WORDS + elif isinstance(stop, str): + raise ValueError("not a built-in stop list: %s" % stop) + elif stop is None: + return None + else: # assume it's a collection + return frozenset(stop) + + +class _VectorizerMixin: + """Provides common code for text vectorizers (tokenization logic).""" + + _white_spaces = re.compile(r"\s\s+") + + def decode(self, doc): + """Decode the input into a string of unicode symbols. + + The decoding strategy depends on the vectorizer parameters. + + Parameters + ---------- + doc : bytes or str + The string to decode. + + Returns + ------- + doc: str + A string of unicode symbols. + """ + if self.input == "filename": + with open(doc, "rb") as fh: + doc = fh.read() + + elif self.input == "file": + doc = doc.read() + + if isinstance(doc, bytes): + doc = doc.decode(self.encoding, self.decode_error) + + if doc is np.nan: + raise ValueError( + "np.nan is an invalid document, expected byte or unicode string." + ) + + return doc + + def _word_ngrams(self, tokens, stop_words=None): + """Turn tokens into a sequence of n-grams after stop words filtering""" + # handle stop words + if stop_words is not None: + tokens = [w for w in tokens if w not in stop_words] + + # handle token n-grams + min_n, max_n = self.ngram_range + if max_n != 1: + original_tokens = tokens + if min_n == 1: + # no need to do any slicing for unigrams + # just iterate through the original tokens + tokens = list(original_tokens) + min_n += 1 + else: + tokens = [] + + n_original_tokens = len(original_tokens) + + # bind method outside of loop to reduce overhead + tokens_append = tokens.append + space_join = " ".join + + for n in range(min_n, min(max_n + 1, n_original_tokens + 1)): + for i in range(n_original_tokens - n + 1): + tokens_append(space_join(original_tokens[i : i + n])) + + return tokens + + def _char_ngrams(self, text_document): + """Tokenize text_document into a sequence of character n-grams""" + # normalize white spaces + text_document = self._white_spaces.sub(" ", text_document) + + text_len = len(text_document) + min_n, max_n = self.ngram_range + if min_n == 1: + # no need to do any slicing for unigrams + # iterate through the string + ngrams = list(text_document) + min_n += 1 + else: + ngrams = [] + + # bind method outside of loop to reduce overhead + ngrams_append = ngrams.append + + for n in range(min_n, min(max_n + 1, text_len + 1)): + for i in range(text_len - n + 1): + ngrams_append(text_document[i : i + n]) + return ngrams + + def _char_wb_ngrams(self, text_document): + """Whitespace sensitive char-n-gram tokenization. + + Tokenize text_document into a sequence of character n-grams + operating only inside word boundaries. n-grams at the edges + of words are padded with space.""" + # normalize white spaces + text_document = self._white_spaces.sub(" ", text_document) + + min_n, max_n = self.ngram_range + ngrams = [] + + # bind method outside of loop to reduce overhead + ngrams_append = ngrams.append + + for w in text_document.split(): + w = " " + w + " " + w_len = len(w) + for n in range(min_n, max_n + 1): + offset = 0 + ngrams_append(w[offset : offset + n]) + while offset + n < w_len: + offset += 1 + ngrams_append(w[offset : offset + n]) + if offset == 0: # count a short word (w_len < n) only once + break + return ngrams + + def build_preprocessor(self): + """Return a function to preprocess the text before tokenization. + + Returns + ------- + preprocessor: callable + A function to preprocess the text before tokenization. + """ + if self.preprocessor is not None: + return self.preprocessor + + # accent stripping + if not self.strip_accents: + strip_accents = None + elif callable(self.strip_accents): + strip_accents = self.strip_accents + elif self.strip_accents == "ascii": + strip_accents = strip_accents_ascii + elif self.strip_accents == "unicode": + strip_accents = strip_accents_unicode + else: + raise ValueError( + 'Invalid value for "strip_accents": %s' % self.strip_accents + ) + + return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase) + + def build_tokenizer(self): + """Return a function that splits a string into a sequence of tokens. + + Returns + ------- + tokenizer: callable + A function to split a string into a sequence of tokens. + """ + if self.tokenizer is not None: + return self.tokenizer + token_pattern = re.compile(self.token_pattern) + + if token_pattern.groups > 1: + raise ValueError( + "More than 1 capturing group in token pattern. Only a single " + "group should be captured." + ) + + return token_pattern.findall + + def get_stop_words(self): + """Build or fetch the effective stop words list. + + Returns + ------- + stop_words: list or None + A list of stop words. + """ + return _check_stop_list(self.stop_words) + + def _check_stop_words_consistency(self, stop_words, preprocess, tokenize): + """Check if stop words are consistent + + Returns + ------- + is_consistent : True if stop words are consistent with the preprocessor + and tokenizer, False if they are not, None if the check + was previously performed, "error" if it could not be + performed (e.g. because of the use of a custom + preprocessor / tokenizer) + """ + if id(self.stop_words) == getattr(self, "_stop_words_id", None): + # Stop words are were previously validated + return None + + # NB: stop_words is validated, unlike self.stop_words + try: + inconsistent = set() + for w in stop_words or (): + tokens = list(tokenize(preprocess(w))) + for token in tokens: + if token not in stop_words: + inconsistent.add(token) + self._stop_words_id = id(self.stop_words) + + if inconsistent: + warnings.warn( + "Your stop_words may be inconsistent with " + "your preprocessing. Tokenizing the stop " + "words generated tokens %r not in " + "stop_words." + % sorted(inconsistent) + ) + return not inconsistent + except Exception: + # Failed to check stop words consistency (e.g. because a custom + # preprocessor or tokenizer was used) + self._stop_words_id = id(self.stop_words) + return "error" + + def build_analyzer(self): + """Return a callable to process input data. + + The callable handles preprocessing, tokenization, and n-grams generation. + + Returns + ------- + analyzer: callable + A function to handle preprocessing, tokenization + and n-grams generation. + """ + + if callable(self.analyzer): + return partial(_analyze, analyzer=self.analyzer, decoder=self.decode) + + preprocess = self.build_preprocessor() + + if self.analyzer == "char": + return partial( + _analyze, + ngrams=self._char_ngrams, + preprocessor=preprocess, + decoder=self.decode, + ) + + elif self.analyzer == "char_wb": + return partial( + _analyze, + ngrams=self._char_wb_ngrams, + preprocessor=preprocess, + decoder=self.decode, + ) + + elif self.analyzer == "word": + stop_words = self.get_stop_words() + tokenize = self.build_tokenizer() + self._check_stop_words_consistency(stop_words, preprocess, tokenize) + return partial( + _analyze, + ngrams=self._word_ngrams, + tokenizer=tokenize, + preprocessor=preprocess, + decoder=self.decode, + stop_words=stop_words, + ) + + else: + raise ValueError( + "%s is not a valid tokenization scheme/analyzer" % self.analyzer + ) + + def _validate_vocabulary(self): + vocabulary = self.vocabulary + if vocabulary is not None: + if isinstance(vocabulary, set): + vocabulary = sorted(vocabulary) + if not isinstance(vocabulary, Mapping): + vocab = {} + for i, t in enumerate(vocabulary): + if vocab.setdefault(t, i) != i: + msg = "Duplicate term in vocabulary: %r" % t + raise ValueError(msg) + vocabulary = vocab + else: + indices = set(vocabulary.values()) + if len(indices) != len(vocabulary): + raise ValueError("Vocabulary contains repeated indices.") + for i in range(len(vocabulary)): + if i not in indices: + msg = "Vocabulary of size %d doesn't contain index %d." % ( + len(vocabulary), + i, + ) + raise ValueError(msg) + if not vocabulary: + raise ValueError("empty vocabulary passed to fit") + self.fixed_vocabulary_ = True + self.vocabulary_ = dict(vocabulary) + else: + self.fixed_vocabulary_ = False + + def _check_vocabulary(self): + """Check if vocabulary is empty or missing (not fitted)""" + if not hasattr(self, "vocabulary_"): + self._validate_vocabulary() + if not self.fixed_vocabulary_: + raise NotFittedError("Vocabulary not fitted or provided") + + if len(self.vocabulary_) == 0: + raise ValueError("Vocabulary is empty") + + def _validate_ngram_range(self): + """Check validity of ngram_range parameter""" + min_n, max_m = self.ngram_range + if min_n > max_m: + raise ValueError( + "Invalid value for ngram_range=%s " + "lower boundary larger than the upper boundary." + % str(self.ngram_range) + ) + + def _warn_for_unused_params(self): + if self.tokenizer is not None and self.token_pattern is not None: + warnings.warn( + "The parameter 'token_pattern' will not be used" + " since 'tokenizer' is not None'" + ) + + if self.preprocessor is not None and callable(self.analyzer): + warnings.warn( + "The parameter 'preprocessor' will not be used" + " since 'analyzer' is callable'" + ) + + if ( + self.ngram_range != (1, 1) + and self.ngram_range is not None + and callable(self.analyzer) + ): + warnings.warn( + "The parameter 'ngram_range' will not be used" + " since 'analyzer' is callable'" + ) + if self.analyzer != "word" or callable(self.analyzer): + if self.stop_words is not None: + warnings.warn( + "The parameter 'stop_words' will not be used" + " since 'analyzer' != 'word'" + ) + if ( + self.token_pattern is not None + and self.token_pattern != r"(?u)\b\w\w+\b" + ): + warnings.warn( + "The parameter 'token_pattern' will not be used" + " since 'analyzer' != 'word'" + ) + if self.tokenizer is not None: + warnings.warn( + "The parameter 'tokenizer' will not be used" + " since 'analyzer' != 'word'" + ) + + +class HashingVectorizer( + TransformerMixin, _VectorizerMixin, BaseEstimator, auto_wrap_output_keys=None +): + r"""Convert a collection of text documents to a matrix of token occurrences. + + It turns a collection of text documents into a scipy.sparse matrix holding + token occurrence counts (or binary occurrence information), possibly + normalized as token frequencies if norm='l1' or projected on the euclidean + unit sphere if norm='l2'. + + This text vectorizer implementation uses the hashing trick to find the + token string name to feature integer index mapping. + + This strategy has several advantages: + + - it is very low memory scalable to large datasets as there is no need to + store a vocabulary dictionary in memory. + + - it is fast to pickle and un-pickle as it holds no state besides the + constructor parameters. + + - it can be used in a streaming (partial fit) or parallel pipeline as there + is no state computed during fit. + + There are also a couple of cons (vs using a CountVectorizer with an + in-memory vocabulary): + + - there is no way to compute the inverse transform (from feature indices to + string feature names) which can be a problem when trying to introspect + which features are most important to a model. + + - there can be collisions: distinct tokens can be mapped to the same + feature index. However in practice this is rarely an issue if n_features + is large enough (e.g. 2 ** 18 for text classification problems). + + - no IDF weighting as this would render the transformer stateful. + + The hash function employed is the signed 32-bit version of Murmurhash3. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + input : {'filename', 'file', 'content'}, default='content' + - If `'filename'`, the sequence passed as an argument to fit is + expected to be a list of filenames that need reading to fetch + the raw content to analyze. + + - If `'file'`, the sequence items must have a 'read' method (file-like + object) that is called to fetch the bytes in memory. + + - If `'content'`, the input is expected to be a sequence of items that + can be of type string or byte. + + encoding : str, default='utf-8' + If bytes or files are given to analyze, this encoding is used to + decode. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. By default, it is + 'strict', meaning that a UnicodeDecodeError will be raised. Other + values are 'ignore' and 'replace'. + + strip_accents : {'ascii', 'unicode'} or callable, default=None + Remove accents and perform other character normalization + during the preprocessing step. + 'ascii' is a fast method that only works on characters that have + a direct ASCII mapping. + 'unicode' is a slightly slower method that works on any character. + None (default) means no character normalization is performed. + + Both 'ascii' and 'unicode' use NFKD normalization from + :func:`unicodedata.normalize`. + + lowercase : bool, default=True + Convert all characters to lowercase before tokenizing. + + preprocessor : callable, default=None + Override the preprocessing (string transformation) stage while + preserving the tokenizing and n-grams generation steps. + Only applies if ``analyzer`` is not callable. + + tokenizer : callable, default=None + Override the string tokenization step while preserving the + preprocessing and n-grams generation steps. + Only applies if ``analyzer == 'word'``. + + stop_words : {'english'}, list, default=None + If 'english', a built-in stop word list for English is used. + There are several known issues with 'english' and you should + consider an alternative (see :ref:`stop_words`). + + If a list, that list is assumed to contain stop words, all of which + will be removed from the resulting tokens. + Only applies if ``analyzer == 'word'``. + + token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b" + Regular expression denoting what constitutes a "token", only used + if ``analyzer == 'word'``. The default regexp selects tokens of 2 + or more alphanumeric characters (punctuation is completely ignored + and always treated as a token separator). + + If there is a capturing group in token_pattern then the + captured group content, not the entire match, becomes the token. + At most one capturing group is permitted. + + ngram_range : tuple (min_n, max_n), default=(1, 1) + The lower and upper boundary of the range of n-values for different + n-grams to be extracted. All values of n such that min_n <= n <= max_n + will be used. For example an ``ngram_range`` of ``(1, 1)`` means only + unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means + only bigrams. + Only applies if ``analyzer`` is not callable. + + analyzer : {'word', 'char', 'char_wb'} or callable, default='word' + Whether the feature should be made of word or character n-grams. + Option 'char_wb' creates character n-grams only from text inside + word boundaries; n-grams at the edges of words are padded with space. + + If a callable is passed it is used to extract the sequence of features + out of the raw, unprocessed input. + + .. versionchanged:: 0.21 + Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data + is first read from the file and then passed to the given callable + analyzer. + + n_features : int, default=(2 ** 20) + The number of features (columns) in the output matrices. Small numbers + of features are likely to cause hash collisions, but large numbers + will cause larger coefficient dimensions in linear learners. + + binary : bool, default=False + If True, all non zero counts are set to 1. This is useful for discrete + probabilistic models that model binary events rather than integer + counts. + + norm : {'l1', 'l2'}, default='l2' + Norm used to normalize term vectors. None for no normalization. + + alternate_sign : bool, default=True + When True, an alternating sign is added to the features as to + approximately conserve the inner product in the hashed space even for + small n_features. This approach is similar to sparse random projection. + + .. versionadded:: 0.19 + + dtype : type, default=np.float64 + Type of the matrix returned by fit_transform() or transform(). + + See Also + -------- + CountVectorizer : Convert a collection of text documents to a matrix of + token counts. + TfidfVectorizer : Convert a collection of raw documents to a matrix of + TF-IDF features. + + Notes + ----- + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.feature_extraction.text import HashingVectorizer + >>> corpus = [ + ... 'This is the first document.', + ... 'This document is the second document.', + ... 'And this is the third one.', + ... 'Is this the first document?', + ... ] + >>> vectorizer = HashingVectorizer(n_features=2**4) + >>> X = vectorizer.fit_transform(corpus) + >>> print(X.shape) + (4, 16) + """ + + _parameter_constraints: dict = { + "input": [StrOptions({"filename", "file", "content"})], + "encoding": [str], + "decode_error": [StrOptions({"strict", "ignore", "replace"})], + "strip_accents": [StrOptions({"ascii", "unicode"}), None, callable], + "lowercase": ["boolean"], + "preprocessor": [callable, None], + "tokenizer": [callable, None], + "stop_words": [StrOptions({"english"}), list, None], + "token_pattern": [str, None], + "ngram_range": [tuple], + "analyzer": [StrOptions({"word", "char", "char_wb"}), callable], + "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="left")], + "binary": ["boolean"], + "norm": [StrOptions({"l1", "l2"}), None], + "alternate_sign": ["boolean"], + "dtype": "no_validation", # delegate to numpy + } + + def __init__( + self, + *, + input="content", + encoding="utf-8", + decode_error="strict", + strip_accents=None, + lowercase=True, + preprocessor=None, + tokenizer=None, + stop_words=None, + token_pattern=r"(?u)\b\w\w+\b", + ngram_range=(1, 1), + analyzer="word", + n_features=(2**20), + binary=False, + norm="l2", + alternate_sign=True, + dtype=np.float64, + ): + self.input = input + self.encoding = encoding + self.decode_error = decode_error + self.strip_accents = strip_accents + self.preprocessor = preprocessor + self.tokenizer = tokenizer + self.analyzer = analyzer + self.lowercase = lowercase + self.token_pattern = token_pattern + self.stop_words = stop_words + self.n_features = n_features + self.ngram_range = ngram_range + self.binary = binary + self.norm = norm + self.alternate_sign = alternate_sign + self.dtype = dtype + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : ndarray of shape [n_samples, n_features] + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + HashingVectorizer instance. + """ + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : ndarray of shape [n_samples, n_features] + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + HashingVectorizer instance. + """ + # triggers a parameter validation + if isinstance(X, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + + self._warn_for_unused_params() + self._validate_ngram_range() + + self._get_hasher().fit(X, y=y) + return self + + def transform(self, X): + """Transform a sequence of documents to a document-term matrix. + + Parameters + ---------- + X : iterable over raw text documents, length = n_samples + Samples. Each sample must be a text document (either bytes or + unicode strings, file name or file object depending on the + constructor argument) which will be tokenized and hashed. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Document-term matrix. + """ + if isinstance(X, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + + self._validate_ngram_range() + + analyzer = self.build_analyzer() + X = self._get_hasher().transform(analyzer(doc) for doc in X) + if self.binary: + X.data.fill(1) + if self.norm is not None: + X = normalize(X, norm=self.norm, copy=False) + return X + + def fit_transform(self, X, y=None): + """Transform a sequence of documents to a document-term matrix. + + Parameters + ---------- + X : iterable over raw text documents, length = n_samples + Samples. Each sample must be a text document (either bytes or + unicode strings, file name or file object depending on the + constructor argument) which will be tokenized and hashed. + y : any + Ignored. This parameter exists only for compatibility with + sklearn.pipeline.Pipeline. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Document-term matrix. + """ + return self.fit(X, y).transform(X) + + def _get_hasher(self): + return FeatureHasher( + n_features=self.n_features, + input_type="string", + dtype=self.dtype, + alternate_sign=self.alternate_sign, + ) + + def _more_tags(self): + return {"X_types": ["string"]} + + +def _document_frequency(X): + """Count the number of non-zero values for each feature in sparse X.""" + if sp.issparse(X) and X.format == "csr": + return np.bincount(X.indices, minlength=X.shape[1]) + else: + return np.diff(X.indptr) + + +class CountVectorizer(_VectorizerMixin, BaseEstimator): + r"""Convert a collection of text documents to a matrix of token counts. + + This implementation produces a sparse representation of the counts using + scipy.sparse.csr_matrix. + + If you do not provide an a-priori dictionary and you do not use an analyzer + that does some kind of feature selection then the number of features will + be equal to the vocabulary size found by analyzing the data. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + input : {'filename', 'file', 'content'}, default='content' + - If `'filename'`, the sequence passed as an argument to fit is + expected to be a list of filenames that need reading to fetch + the raw content to analyze. + + - If `'file'`, the sequence items must have a 'read' method (file-like + object) that is called to fetch the bytes in memory. + + - If `'content'`, the input is expected to be a sequence of items that + can be of type string or byte. + + encoding : str, default='utf-8' + If bytes or files are given to analyze, this encoding is used to + decode. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. By default, it is + 'strict', meaning that a UnicodeDecodeError will be raised. Other + values are 'ignore' and 'replace'. + + strip_accents : {'ascii', 'unicode'} or callable, default=None + Remove accents and perform other character normalization + during the preprocessing step. + 'ascii' is a fast method that only works on characters that have + a direct ASCII mapping. + 'unicode' is a slightly slower method that works on any characters. + None (default) means no character normalization is performed. + + Both 'ascii' and 'unicode' use NFKD normalization from + :func:`unicodedata.normalize`. + + lowercase : bool, default=True + Convert all characters to lowercase before tokenizing. + + preprocessor : callable, default=None + Override the preprocessing (strip_accents and lowercase) stage while + preserving the tokenizing and n-grams generation steps. + Only applies if ``analyzer`` is not callable. + + tokenizer : callable, default=None + Override the string tokenization step while preserving the + preprocessing and n-grams generation steps. + Only applies if ``analyzer == 'word'``. + + stop_words : {'english'}, list, default=None + If 'english', a built-in stop word list for English is used. + There are several known issues with 'english' and you should + consider an alternative (see :ref:`stop_words`). + + If a list, that list is assumed to contain stop words, all of which + will be removed from the resulting tokens. + Only applies if ``analyzer == 'word'``. + + If None, no stop words will be used. In this case, setting `max_df` + to a higher value, such as in the range (0.7, 1.0), can automatically detect + and filter stop words based on intra corpus document frequency of terms. + + token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b" + Regular expression denoting what constitutes a "token", only used + if ``analyzer == 'word'``. The default regexp select tokens of 2 + or more alphanumeric characters (punctuation is completely ignored + and always treated as a token separator). + + If there is a capturing group in token_pattern then the + captured group content, not the entire match, becomes the token. + At most one capturing group is permitted. + + ngram_range : tuple (min_n, max_n), default=(1, 1) + The lower and upper boundary of the range of n-values for different + word n-grams or char n-grams to be extracted. All values of n such + such that min_n <= n <= max_n will be used. For example an + ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means + unigrams and bigrams, and ``(2, 2)`` means only bigrams. + Only applies if ``analyzer`` is not callable. + + analyzer : {'word', 'char', 'char_wb'} or callable, default='word' + Whether the feature should be made of word n-gram or character + n-grams. + Option 'char_wb' creates character n-grams only from text inside + word boundaries; n-grams at the edges of words are padded with space. + + If a callable is passed it is used to extract the sequence of features + out of the raw, unprocessed input. + + .. versionchanged:: 0.21 + + Since v0.21, if ``input`` is ``filename`` or ``file``, the data is + first read from the file and then passed to the given callable + analyzer. + + max_df : float in range [0.0, 1.0] or int, default=1.0 + When building the vocabulary ignore terms that have a document + frequency strictly higher than the given threshold (corpus-specific + stop words). + If float, the parameter represents a proportion of documents, integer + absolute counts. + This parameter is ignored if vocabulary is not None. + + min_df : float in range [0.0, 1.0] or int, default=1 + When building the vocabulary ignore terms that have a document + frequency strictly lower than the given threshold. This value is also + called cut-off in the literature. + If float, the parameter represents a proportion of documents, integer + absolute counts. + This parameter is ignored if vocabulary is not None. + + max_features : int, default=None + If not None, build a vocabulary that only consider the top + `max_features` ordered by term frequency across the corpus. + Otherwise, all features are used. + + This parameter is ignored if vocabulary is not None. + + vocabulary : Mapping or iterable, default=None + Either a Mapping (e.g., a dict) where keys are terms and values are + indices in the feature matrix, or an iterable over terms. If not + given, a vocabulary is determined from the input documents. Indices + in the mapping should not be repeated and should not have any gap + between 0 and the largest index. + + binary : bool, default=False + If True, all non zero counts are set to 1. This is useful for discrete + probabilistic models that model binary events rather than integer + counts. + + dtype : dtype, default=np.int64 + Type of the matrix returned by fit_transform() or transform(). + + Attributes + ---------- + vocabulary_ : dict + A mapping of terms to feature indices. + + fixed_vocabulary_ : bool + True if a fixed vocabulary of term to indices mapping + is provided by the user. + + stop_words_ : set + Terms that were ignored because they either: + + - occurred in too many documents (`max_df`) + - occurred in too few documents (`min_df`) + - were cut off by feature selection (`max_features`). + + This is only available if no vocabulary was given. + + See Also + -------- + HashingVectorizer : Convert a collection of text documents to a + matrix of token counts. + + TfidfVectorizer : Convert a collection of raw documents to a matrix + of TF-IDF features. + + Notes + ----- + The ``stop_words_`` attribute can get large and increase the model size + when pickling. This attribute is provided only for introspection and can + be safely removed using delattr or set to None before pickling. + + Examples + -------- + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> corpus = [ + ... 'This is the first document.', + ... 'This document is the second document.', + ... 'And this is the third one.', + ... 'Is this the first document?', + ... ] + >>> vectorizer = CountVectorizer() + >>> X = vectorizer.fit_transform(corpus) + >>> vectorizer.get_feature_names_out() + array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', + 'this'], ...) + >>> print(X.toarray()) + [[0 1 1 1 0 0 1 0 1] + [0 2 0 1 0 1 1 0 1] + [1 0 0 1 1 0 1 1 1] + [0 1 1 1 0 0 1 0 1]] + >>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2)) + >>> X2 = vectorizer2.fit_transform(corpus) + >>> vectorizer2.get_feature_names_out() + array(['and this', 'document is', 'first document', 'is the', 'is this', + 'second document', 'the first', 'the second', 'the third', 'third one', + 'this document', 'this is', 'this the'], ...) + >>> print(X2.toarray()) + [[0 0 1 1 0 0 1 0 0 0 0 1 0] + [0 1 0 1 0 1 0 1 0 0 1 0 0] + [1 0 0 1 0 0 0 0 1 1 0 1 0] + [0 0 1 0 1 0 1 0 0 0 0 0 1]] + """ + + _parameter_constraints: dict = { + "input": [StrOptions({"filename", "file", "content"})], + "encoding": [str], + "decode_error": [StrOptions({"strict", "ignore", "replace"})], + "strip_accents": [StrOptions({"ascii", "unicode"}), None, callable], + "lowercase": ["boolean"], + "preprocessor": [callable, None], + "tokenizer": [callable, None], + "stop_words": [StrOptions({"english"}), list, None], + "token_pattern": [str, None], + "ngram_range": [tuple], + "analyzer": [StrOptions({"word", "char", "char_wb"}), callable], + "max_df": [ + Interval(RealNotInt, 0, 1, closed="both"), + Interval(Integral, 1, None, closed="left"), + ], + "min_df": [ + Interval(RealNotInt, 0, 1, closed="both"), + Interval(Integral, 1, None, closed="left"), + ], + "max_features": [Interval(Integral, 1, None, closed="left"), None], + "vocabulary": [Mapping, HasMethods("__iter__"), None], + "binary": ["boolean"], + "dtype": "no_validation", # delegate to numpy + } + + def __init__( + self, + *, + input="content", + encoding="utf-8", + decode_error="strict", + strip_accents=None, + lowercase=True, + preprocessor=None, + tokenizer=None, + stop_words=None, + token_pattern=r"(?u)\b\w\w+\b", + ngram_range=(1, 1), + analyzer="word", + max_df=1.0, + min_df=1, + max_features=None, + vocabulary=None, + binary=False, + dtype=np.int64, + ): + self.input = input + self.encoding = encoding + self.decode_error = decode_error + self.strip_accents = strip_accents + self.preprocessor = preprocessor + self.tokenizer = tokenizer + self.analyzer = analyzer + self.lowercase = lowercase + self.token_pattern = token_pattern + self.stop_words = stop_words + self.max_df = max_df + self.min_df = min_df + self.max_features = max_features + self.ngram_range = ngram_range + self.vocabulary = vocabulary + self.binary = binary + self.dtype = dtype + + def _sort_features(self, X, vocabulary): + """Sort features by name + + Returns a reordered matrix and modifies the vocabulary in place + """ + sorted_features = sorted(vocabulary.items()) + map_index = np.empty(len(sorted_features), dtype=X.indices.dtype) + for new_val, (term, old_val) in enumerate(sorted_features): + vocabulary[term] = new_val + map_index[old_val] = new_val + + X.indices = map_index.take(X.indices, mode="clip") + return X + + def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): + """Remove too rare or too common features. + + Prune features that are non zero in more samples than high or less + documents than low, modifying the vocabulary, and restricting it to + at most the limit most frequent. + + This does not prune samples with zero features. + """ + if high is None and low is None and limit is None: + return X, set() + + # Calculate a mask based on document frequencies + dfs = _document_frequency(X) + mask = np.ones(len(dfs), dtype=bool) + if high is not None: + mask &= dfs <= high + if low is not None: + mask &= dfs >= low + if limit is not None and mask.sum() > limit: + tfs = np.asarray(X.sum(axis=0)).ravel() + mask_inds = (-tfs[mask]).argsort()[:limit] + new_mask = np.zeros(len(dfs), dtype=bool) + new_mask[np.where(mask)[0][mask_inds]] = True + mask = new_mask + + new_indices = np.cumsum(mask) - 1 # maps old indices to new + removed_terms = set() + for term, old_index in list(vocabulary.items()): + if mask[old_index]: + vocabulary[term] = new_indices[old_index] + else: + del vocabulary[term] + removed_terms.add(term) + kept_indices = np.where(mask)[0] + if len(kept_indices) == 0: + raise ValueError( + "After pruning, no terms remain. Try a lower min_df or a higher max_df." + ) + return X[:, kept_indices], removed_terms + + def _count_vocab(self, raw_documents, fixed_vocab): + """Create sparse feature matrix, and vocabulary where fixed_vocab=False""" + if fixed_vocab: + vocabulary = self.vocabulary_ + else: + # Add a new value when a new vocabulary item is seen + vocabulary = defaultdict() + vocabulary.default_factory = vocabulary.__len__ + + analyze = self.build_analyzer() + j_indices = [] + indptr = [] + + values = _make_int_array() + indptr.append(0) + for doc in raw_documents: + feature_counter = {} + for feature in analyze(doc): + try: + feature_idx = vocabulary[feature] + if feature_idx not in feature_counter: + feature_counter[feature_idx] = 1 + else: + feature_counter[feature_idx] += 1 + except KeyError: + # Ignore out-of-vocabulary items for fixed_vocab=True + continue + + j_indices.extend(feature_counter.keys()) + values.extend(feature_counter.values()) + indptr.append(len(j_indices)) + + if not fixed_vocab: + # disable defaultdict behaviour + vocabulary = dict(vocabulary) + if not vocabulary: + raise ValueError( + "empty vocabulary; perhaps the documents only contain stop words" + ) + + if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1 + if _IS_32BIT: + raise ValueError( + ( + "sparse CSR array has {} non-zero " + "elements and requires 64 bit indexing, " + "which is unsupported with 32 bit Python." + ).format(indptr[-1]) + ) + indices_dtype = np.int64 + + else: + indices_dtype = np.int32 + j_indices = np.asarray(j_indices, dtype=indices_dtype) + indptr = np.asarray(indptr, dtype=indices_dtype) + values = np.frombuffer(values, dtype=np.intc) + + X = sp.csr_matrix( + (values, j_indices, indptr), + shape=(len(indptr) - 1, len(vocabulary)), + dtype=self.dtype, + ) + X.sort_indices() + return vocabulary, X + + def fit(self, raw_documents, y=None): + """Learn a vocabulary dictionary of all tokens in the raw documents. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is ignored. + + Returns + ------- + self : object + Fitted vectorizer. + """ + self.fit_transform(raw_documents) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, raw_documents, y=None): + """Learn the vocabulary dictionary and return document-term matrix. + + This is equivalent to fit followed by transform, but more efficiently + implemented. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is ignored. + + Returns + ------- + X : array of shape (n_samples, n_features) + Document-term matrix. + """ + # We intentionally don't call the transform method to make + # fit_transform overridable without unwanted side effects in + # TfidfVectorizer. + if isinstance(raw_documents, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + + self._validate_ngram_range() + self._warn_for_unused_params() + self._validate_vocabulary() + max_df = self.max_df + min_df = self.min_df + max_features = self.max_features + + if self.fixed_vocabulary_ and self.lowercase: + for term in self.vocabulary: + if any(map(str.isupper, term)): + warnings.warn( + "Upper case characters found in" + " vocabulary while 'lowercase'" + " is True. These entries will not" + " be matched with any documents" + ) + break + + vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) + + if self.binary: + X.data.fill(1) + + if not self.fixed_vocabulary_: + n_doc = X.shape[0] + max_doc_count = max_df if isinstance(max_df, Integral) else max_df * n_doc + min_doc_count = min_df if isinstance(min_df, Integral) else min_df * n_doc + if max_doc_count < min_doc_count: + raise ValueError("max_df corresponds to < documents than min_df") + if max_features is not None: + X = self._sort_features(X, vocabulary) + X, self.stop_words_ = self._limit_features( + X, vocabulary, max_doc_count, min_doc_count, max_features + ) + if max_features is None: + X = self._sort_features(X, vocabulary) + self.vocabulary_ = vocabulary + + return X + + def transform(self, raw_documents): + """Transform documents to document-term matrix. + + Extract token counts out of raw text documents using the vocabulary + fitted with fit or the one provided to the constructor. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Document-term matrix. + """ + if isinstance(raw_documents, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + self._check_vocabulary() + + # use the same matrix-building strategy as fit_transform + _, X = self._count_vocab(raw_documents, fixed_vocab=True) + if self.binary: + X.data.fill(1) + return X + + def inverse_transform(self, X): + """Return terms per document with nonzero entries in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document-term matrix. + + Returns + ------- + X_inv : list of arrays of shape (n_samples,) + List of arrays of terms. + """ + self._check_vocabulary() + # We need CSR format for fast row manipulations. + X = check_array(X, accept_sparse="csr") + n_samples = X.shape[0] + + terms = np.array(list(self.vocabulary_.keys())) + indices = np.array(list(self.vocabulary_.values())) + inverse_vocabulary = terms[np.argsort(indices)] + + if sp.issparse(X): + return [ + inverse_vocabulary[X[i, :].nonzero()[1]].ravel() + for i in range(n_samples) + ] + else: + return [ + inverse_vocabulary[np.flatnonzero(X[i, :])].ravel() + for i in range(n_samples) + ] + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + self._check_vocabulary() + return np.asarray( + [t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))], + dtype=object, + ) + + def _more_tags(self): + return {"X_types": ["string"]} + + +def _make_int_array(): + """Construct an array.array of a type suitable for scipy.sparse indices.""" + return array.array(str("i")) + + +class TfidfTransformer( + OneToOneFeatureMixin, TransformerMixin, BaseEstimator, auto_wrap_output_keys=None +): + """Transform a count matrix to a normalized tf or tf-idf representation. + + Tf means term-frequency while tf-idf means term-frequency times inverse + document-frequency. This is a common term weighting scheme in information + retrieval, that has also found good use in document classification. + + The goal of using tf-idf instead of the raw frequencies of occurrence of a + token in a given document is to scale down the impact of tokens that occur + very frequently in a given corpus and that are hence empirically less + informative than features that occur in a small fraction of the training + corpus. + + The formula that is used to compute the tf-idf for a term t of a document d + in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is + computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where + n is the total number of documents in the document set and df(t) is the + document frequency of t; the document frequency is the number of documents + in the document set that contain the term t. The effect of adding "1" to + the idf in the equation above is that terms with zero idf, i.e., terms + that occur in all documents in a training set, will not be entirely + ignored. + (Note that the idf formula above differs from the standard textbook + notation that defines the idf as + idf(t) = log [ n / (df(t) + 1) ]). + + If ``smooth_idf=True`` (the default), the constant "1" is added to the + numerator and denominator of the idf as if an extra document was seen + containing every term in the collection exactly once, which prevents + zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1. + + Furthermore, the formulas used to compute tf and idf depend + on parameter settings that correspond to the SMART notation used in IR + as follows: + + Tf is "n" (natural) by default, "l" (logarithmic) when + ``sublinear_tf=True``. + Idf is "t" when use_idf is given, "n" (none) otherwise. + Normalization is "c" (cosine) when ``norm='l2'``, "n" (none) + when ``norm=None``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + norm : {'l1', 'l2'} or None, default='l2' + Each output row will have unit norm, either: + + - 'l2': Sum of squares of vector elements is 1. The cosine + similarity between two vectors is their dot product when l2 norm has + been applied. + - 'l1': Sum of absolute values of vector elements is 1. + See :func:`~sklearn.preprocessing.normalize`. + - None: No normalization. + + use_idf : bool, default=True + Enable inverse-document-frequency reweighting. If False, idf(t) = 1. + + smooth_idf : bool, default=True + Smooth idf weights by adding one to document frequencies, as if an + extra document was seen containing every term in the collection + exactly once. Prevents zero divisions. + + sublinear_tf : bool, default=False + Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). + + Attributes + ---------- + idf_ : array of shape (n_features) + The inverse document frequency (IDF) vector; only defined + if ``use_idf`` is True. + + .. versionadded:: 0.20 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 1.0 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + CountVectorizer : Transforms text into a sparse matrix of n-gram counts. + + TfidfVectorizer : Convert a collection of raw documents to a matrix of + TF-IDF features. + + HashingVectorizer : Convert a collection of text documents to a matrix + of token occurrences. + + References + ---------- + .. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern + Information Retrieval. Addison Wesley, pp. 68-74. + + .. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008). + Introduction to Information Retrieval. Cambridge University + Press, pp. 118-120. + + Examples + -------- + >>> from sklearn.feature_extraction.text import TfidfTransformer + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> from sklearn.pipeline import Pipeline + >>> corpus = ['this is the first document', + ... 'this document is the second document', + ... 'and this is the third one', + ... 'is this the first document'] + >>> vocabulary = ['this', 'document', 'first', 'is', 'second', 'the', + ... 'and', 'one'] + >>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)), + ... ('tfid', TfidfTransformer())]).fit(corpus) + >>> pipe['count'].transform(corpus).toarray() + array([[1, 1, 1, 1, 0, 1, 0, 0], + [1, 2, 0, 1, 1, 1, 0, 0], + [1, 0, 0, 1, 0, 1, 1, 1], + [1, 1, 1, 1, 0, 1, 0, 0]]) + >>> pipe['tfid'].idf_ + array([1. , 1.22314355, 1.51082562, 1. , 1.91629073, + 1. , 1.91629073, 1.91629073]) + >>> pipe.transform(corpus).shape + (4, 8) + """ + + _parameter_constraints: dict = { + "norm": [StrOptions({"l1", "l2"}), None], + "use_idf": ["boolean"], + "smooth_idf": ["boolean"], + "sublinear_tf": ["boolean"], + } + + def __init__(self, *, norm="l2", use_idf=True, smooth_idf=True, sublinear_tf=False): + self.norm = norm + self.use_idf = use_idf + self.smooth_idf = smooth_idf + self.sublinear_tf = sublinear_tf + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn the idf vector (global term weights). + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + A matrix of term/token counts. + + y : None + This parameter is not needed to compute tf-idf. + + Returns + ------- + self : object + Fitted transformer. + """ + # large sparse data is not supported for 32bit platforms because + # _document_frequency uses np.bincount which works on arrays of + # dtype NPY_INTP which is int32 for 32bit platforms. See #20923 + X = self._validate_data( + X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT + ) + if not sp.issparse(X): + X = sp.csr_matrix(X) + dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64 + + if self.use_idf: + n_samples, n_features = X.shape + df = _document_frequency(X) + df = df.astype(dtype, copy=False) + + # perform idf smoothing if required + df += int(self.smooth_idf) + n_samples += int(self.smooth_idf) + + # log+1 instead of log makes sure terms with zero idf don't get + # suppressed entirely. + idf = np.log(n_samples / df) + 1 + self._idf_diag = sp.diags( + idf, + offsets=0, + shape=(n_features, n_features), + format="csr", + dtype=dtype, + ) + + return self + + def transform(self, X, copy=True): + """Transform a count matrix to a tf or tf-idf representation. + + Parameters + ---------- + X : sparse matrix of (n_samples, n_features) + A matrix of term/token counts. + + copy : bool, default=True + Whether to copy X and operate on the copy or perform in-place + operations. + + Returns + ------- + vectors : sparse matrix of shape (n_samples, n_features) + Tf-idf-weighted document-term matrix. + """ + X = self._validate_data( + X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy, reset=False + ) + if not sp.issparse(X): + X = sp.csr_matrix(X, dtype=np.float64) + + if self.sublinear_tf: + np.log(X.data, X.data) + X.data += 1 + + if self.use_idf: + # idf_ being a property, the automatic attributes detection + # does not work as usual and we need to specify the attribute + # name: + check_is_fitted(self, attributes=["idf_"], msg="idf vector is not fitted") + + X = X @ self._idf_diag + + if self.norm is not None: + X = normalize(X, norm=self.norm, copy=False) + + return X + + @property + def idf_(self): + """Inverse document frequency vector, only defined if `use_idf=True`. + + Returns + ------- + ndarray of shape (n_features,) + """ + # if _idf_diag is not set, this will raise an attribute error, + # which means hasattr(self, "idf_") is False + return np.ravel(self._idf_diag.sum(axis=0)) + + @idf_.setter + def idf_(self, value): + value = np.asarray(value, dtype=np.float64) + n_features = value.shape[0] + self._idf_diag = sp.spdiags( + value, diags=0, m=n_features, n=n_features, format="csr" + ) + + def _more_tags(self): + return {"X_types": ["2darray", "sparse"]} + + +class TfidfVectorizer(CountVectorizer): + r"""Convert a collection of raw documents to a matrix of TF-IDF features. + + Equivalent to :class:`CountVectorizer` followed by + :class:`TfidfTransformer`. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + input : {'filename', 'file', 'content'}, default='content' + - If `'filename'`, the sequence passed as an argument to fit is + expected to be a list of filenames that need reading to fetch + the raw content to analyze. + + - If `'file'`, the sequence items must have a 'read' method (file-like + object) that is called to fetch the bytes in memory. + + - If `'content'`, the input is expected to be a sequence of items that + can be of type string or byte. + + encoding : str, default='utf-8' + If bytes or files are given to analyze, this encoding is used to + decode. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. By default, it is + 'strict', meaning that a UnicodeDecodeError will be raised. Other + values are 'ignore' and 'replace'. + + strip_accents : {'ascii', 'unicode'} or callable, default=None + Remove accents and perform other character normalization + during the preprocessing step. + 'ascii' is a fast method that only works on characters that have + a direct ASCII mapping. + 'unicode' is a slightly slower method that works on any characters. + None (default) means no character normalization is performed. + + Both 'ascii' and 'unicode' use NFKD normalization from + :func:`unicodedata.normalize`. + + lowercase : bool, default=True + Convert all characters to lowercase before tokenizing. + + preprocessor : callable, default=None + Override the preprocessing (string transformation) stage while + preserving the tokenizing and n-grams generation steps. + Only applies if ``analyzer`` is not callable. + + tokenizer : callable, default=None + Override the string tokenization step while preserving the + preprocessing and n-grams generation steps. + Only applies if ``analyzer == 'word'``. + + analyzer : {'word', 'char', 'char_wb'} or callable, default='word' + Whether the feature should be made of word or character n-grams. + Option 'char_wb' creates character n-grams only from text inside + word boundaries; n-grams at the edges of words are padded with space. + + If a callable is passed it is used to extract the sequence of features + out of the raw, unprocessed input. + + .. versionchanged:: 0.21 + Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data + is first read from the file and then passed to the given callable + analyzer. + + stop_words : {'english'}, list, default=None + If a string, it is passed to _check_stop_list and the appropriate stop + list is returned. 'english' is currently the only supported string + value. + There are several known issues with 'english' and you should + consider an alternative (see :ref:`stop_words`). + + If a list, that list is assumed to contain stop words, all of which + will be removed from the resulting tokens. + Only applies if ``analyzer == 'word'``. + + If None, no stop words will be used. In this case, setting `max_df` + to a higher value, such as in the range (0.7, 1.0), can automatically detect + and filter stop words based on intra corpus document frequency of terms. + + token_pattern : str, default=r"(?u)\\b\\w\\w+\\b" + Regular expression denoting what constitutes a "token", only used + if ``analyzer == 'word'``. The default regexp selects tokens of 2 + or more alphanumeric characters (punctuation is completely ignored + and always treated as a token separator). + + If there is a capturing group in token_pattern then the + captured group content, not the entire match, becomes the token. + At most one capturing group is permitted. + + ngram_range : tuple (min_n, max_n), default=(1, 1) + The lower and upper boundary of the range of n-values for different + n-grams to be extracted. All values of n such that min_n <= n <= max_n + will be used. For example an ``ngram_range`` of ``(1, 1)`` means only + unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means + only bigrams. + Only applies if ``analyzer`` is not callable. + + max_df : float or int, default=1.0 + When building the vocabulary ignore terms that have a document + frequency strictly higher than the given threshold (corpus-specific + stop words). + If float in range [0.0, 1.0], the parameter represents a proportion of + documents, integer absolute counts. + This parameter is ignored if vocabulary is not None. + + min_df : float or int, default=1 + When building the vocabulary ignore terms that have a document + frequency strictly lower than the given threshold. This value is also + called cut-off in the literature. + If float in range of [0.0, 1.0], the parameter represents a proportion + of documents, integer absolute counts. + This parameter is ignored if vocabulary is not None. + + max_features : int, default=None + If not None, build a vocabulary that only consider the top + `max_features` ordered by term frequency across the corpus. + Otherwise, all features are used. + + This parameter is ignored if vocabulary is not None. + + vocabulary : Mapping or iterable, default=None + Either a Mapping (e.g., a dict) where keys are terms and values are + indices in the feature matrix, or an iterable over terms. If not + given, a vocabulary is determined from the input documents. + + binary : bool, default=False + If True, all non-zero term counts are set to 1. This does not mean + outputs will have only 0/1 values, only that the tf term in tf-idf + is binary. (Set `binary` to True, `use_idf` to False and + `norm` to None to get 0/1 outputs). + + dtype : dtype, default=float64 + Type of the matrix returned by fit_transform() or transform(). + + norm : {'l1', 'l2'} or None, default='l2' + Each output row will have unit norm, either: + + - 'l2': Sum of squares of vector elements is 1. The cosine + similarity between two vectors is their dot product when l2 norm has + been applied. + - 'l1': Sum of absolute values of vector elements is 1. + See :func:`~sklearn.preprocessing.normalize`. + - None: No normalization. + + use_idf : bool, default=True + Enable inverse-document-frequency reweighting. If False, idf(t) = 1. + + smooth_idf : bool, default=True + Smooth idf weights by adding one to document frequencies, as if an + extra document was seen containing every term in the collection + exactly once. Prevents zero divisions. + + sublinear_tf : bool, default=False + Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). + + Attributes + ---------- + vocabulary_ : dict + A mapping of terms to feature indices. + + fixed_vocabulary_ : bool + True if a fixed vocabulary of term to indices mapping + is provided by the user. + + idf_ : array of shape (n_features,) + The inverse document frequency (IDF) vector; only defined + if ``use_idf`` is True. + + stop_words_ : set + Terms that were ignored because they either: + + - occurred in too many documents (`max_df`) + - occurred in too few documents (`min_df`) + - were cut off by feature selection (`max_features`). + + This is only available if no vocabulary was given. + + See Also + -------- + CountVectorizer : Transforms text into a sparse matrix of n-gram counts. + + TfidfTransformer : Performs the TF-IDF transformation from a provided + matrix of counts. + + Notes + ----- + The ``stop_words_`` attribute can get large and increase the model size + when pickling. This attribute is provided only for introspection and can + be safely removed using delattr or set to None before pickling. + + Examples + -------- + >>> from sklearn.feature_extraction.text import TfidfVectorizer + >>> corpus = [ + ... 'This is the first document.', + ... 'This document is the second document.', + ... 'And this is the third one.', + ... 'Is this the first document?', + ... ] + >>> vectorizer = TfidfVectorizer() + >>> X = vectorizer.fit_transform(corpus) + >>> vectorizer.get_feature_names_out() + array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', + 'this'], ...) + >>> print(X.shape) + (4, 9) + """ + + _parameter_constraints: dict = {**CountVectorizer._parameter_constraints} + _parameter_constraints.update( + { + "norm": [StrOptions({"l1", "l2"}), None], + "use_idf": ["boolean"], + "smooth_idf": ["boolean"], + "sublinear_tf": ["boolean"], + } + ) + + def __init__( + self, + *, + input="content", + encoding="utf-8", + decode_error="strict", + strip_accents=None, + lowercase=True, + preprocessor=None, + tokenizer=None, + analyzer="word", + stop_words=None, + token_pattern=r"(?u)\b\w\w+\b", + ngram_range=(1, 1), + max_df=1.0, + min_df=1, + max_features=None, + vocabulary=None, + binary=False, + dtype=np.float64, + norm="l2", + use_idf=True, + smooth_idf=True, + sublinear_tf=False, + ): + super().__init__( + input=input, + encoding=encoding, + decode_error=decode_error, + strip_accents=strip_accents, + lowercase=lowercase, + preprocessor=preprocessor, + tokenizer=tokenizer, + analyzer=analyzer, + stop_words=stop_words, + token_pattern=token_pattern, + ngram_range=ngram_range, + max_df=max_df, + min_df=min_df, + max_features=max_features, + vocabulary=vocabulary, + binary=binary, + dtype=dtype, + ) + self.norm = norm + self.use_idf = use_idf + self.smooth_idf = smooth_idf + self.sublinear_tf = sublinear_tf + + # Broadcast the TF-IDF parameters to the underlying transformer instance + # for easy grid search and repr + + @property + def idf_(self): + """Inverse document frequency vector, only defined if `use_idf=True`. + + Returns + ------- + ndarray of shape (n_features,) + """ + if not hasattr(self, "_tfidf"): + raise NotFittedError( + f"{self.__class__.__name__} is not fitted yet. Call 'fit' with " + "appropriate arguments before using this attribute." + ) + return self._tfidf.idf_ + + @idf_.setter + def idf_(self, value): + if not self.use_idf: + raise ValueError("`idf_` cannot be set when `user_idf=False`.") + if not hasattr(self, "_tfidf"): + # We should support transferring `idf_` from another `TfidfTransformer` + # and therefore, we need to create the transformer instance it does not + # exist yet. + self._tfidf = TfidfTransformer( + norm=self.norm, + use_idf=self.use_idf, + smooth_idf=self.smooth_idf, + sublinear_tf=self.sublinear_tf, + ) + self._validate_vocabulary() + if hasattr(self, "vocabulary_"): + if len(self.vocabulary_) != len(value): + raise ValueError( + "idf length = %d must be equal to vocabulary size = %d" + % (len(value), len(self.vocabulary)) + ) + self._tfidf.idf_ = value + + def _check_params(self): + if self.dtype not in FLOAT_DTYPES: + warnings.warn( + "Only {} 'dtype' should be used. {} 'dtype' will " + "be converted to np.float64.".format(FLOAT_DTYPES, self.dtype), + UserWarning, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, raw_documents, y=None): + """Learn vocabulary and idf from training set. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is not needed to compute tfidf. + + Returns + ------- + self : object + Fitted vectorizer. + """ + self._check_params() + self._warn_for_unused_params() + self._tfidf = TfidfTransformer( + norm=self.norm, + use_idf=self.use_idf, + smooth_idf=self.smooth_idf, + sublinear_tf=self.sublinear_tf, + ) + X = super().fit_transform(raw_documents) + self._tfidf.fit(X) + return self + + def fit_transform(self, raw_documents, y=None): + """Learn vocabulary and idf, return document-term matrix. + + This is equivalent to fit followed by transform, but more efficiently + implemented. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is ignored. + + Returns + ------- + X : sparse matrix of (n_samples, n_features) + Tf-idf-weighted document-term matrix. + """ + self._check_params() + self._tfidf = TfidfTransformer( + norm=self.norm, + use_idf=self.use_idf, + smooth_idf=self.smooth_idf, + sublinear_tf=self.sublinear_tf, + ) + X = super().fit_transform(raw_documents) + self._tfidf.fit(X) + # X is already a transformed view of raw_documents so + # we set copy to False + return self._tfidf.transform(X, copy=False) + + def transform(self, raw_documents): + """Transform documents to document-term matrix. + + Uses the vocabulary and document frequencies (df) learned by fit (or + fit_transform). + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + Returns + ------- + X : sparse matrix of (n_samples, n_features) + Tf-idf-weighted document-term matrix. + """ + check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted") + + X = super().transform(raw_documents) + return self._tfidf.transform(X, copy=False) + + def _more_tags(self): + return {"X_types": ["string"], "_skip_test": True} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38ce990578aa2d2225ebd8f39d04a037d0a91b16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_label_propagation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_label_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e5386ebe3242bbfb77d3009b7bdbbd063506a58 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_label_propagation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_self_training.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_self_training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bfcad03b05b9f4298f291b18a428d2939eb6fb9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_self_training.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7244529dfdcb40f23600248422e3e13cfb27530 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_label_propagation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_label_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..010b804f79e4509f92518c135b5f6a6d285dfeaf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_label_propagation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_self_training.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_self_training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6544bc3ba05ccb8c95302cb1fe7cfe4da4febe36 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_self_training.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_label_propagation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..8812c3c352a0378f2d24e336cf8b4f0f29fd42a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_label_propagation.py @@ -0,0 +1,238 @@ +""" test the label propagation module """ + +import warnings + +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn.datasets import make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import train_test_split +from sklearn.neighbors import NearestNeighbors +from sklearn.semi_supervised import _label_propagation as label_propagation +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_array_equal, +) + +CONSTRUCTOR_TYPES = ("array", "sparse_csr", "sparse_csc") + +ESTIMATORS = [ + (label_propagation.LabelPropagation, {"kernel": "rbf"}), + (label_propagation.LabelPropagation, {"kernel": "knn", "n_neighbors": 2}), + ( + label_propagation.LabelPropagation, + {"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)}, + ), + (label_propagation.LabelSpreading, {"kernel": "rbf"}), + (label_propagation.LabelSpreading, {"kernel": "knn", "n_neighbors": 2}), + ( + label_propagation.LabelSpreading, + {"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)}, + ), +] + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_fit_transduction(global_dtype, Estimator, parameters): + samples = np.asarray([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert clf.transduction_[2] == 1 + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_distribution(global_dtype, Estimator, parameters): + if parameters["kernel"] == "knn": + pytest.skip( + "Unstable test for this configuration: changes in k-NN ordering break it." + ) + samples = np.asarray([[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert_allclose(clf.label_distributions_[2], [0.5, 0.5], atol=1e-2) + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_predict(global_dtype, Estimator, parameters): + samples = np.asarray([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1])) + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_predict_proba(global_dtype, Estimator, parameters): + samples = np.asarray([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert_allclose(clf.predict_proba([[1.0, 1.0]]), np.array([[0.5, 0.5]])) + + +@pytest.mark.parametrize("alpha", [0.1, 0.3, 0.5, 0.7, 0.9]) +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_label_spreading_closed_form(global_dtype, Estimator, parameters, alpha): + n_classes = 2 + X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0) + X = X.astype(global_dtype, copy=False) + y[::3] = -1 + + gamma = 0.1 + clf = label_propagation.LabelSpreading(gamma=gamma).fit(X, y) + # adopting notation from Zhou et al (2004): + S = clf._build_graph() + Y = np.zeros((len(y), n_classes + 1), dtype=X.dtype) + Y[np.arange(len(y)), y] = 1 + Y = Y[:, :-1] + + expected = np.dot(np.linalg.inv(np.eye(len(S), dtype=S.dtype) - alpha * S), Y) + expected /= expected.sum(axis=1)[:, np.newaxis] + + clf = label_propagation.LabelSpreading( + max_iter=100, alpha=alpha, tol=1e-10, gamma=gamma + ) + clf.fit(X, y) + + assert_allclose(expected, clf.label_distributions_) + + +def test_label_propagation_closed_form(global_dtype): + n_classes = 2 + X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0) + X = X.astype(global_dtype, copy=False) + y[::3] = -1 + Y = np.zeros((len(y), n_classes + 1)) + Y[np.arange(len(y)), y] = 1 + unlabelled_idx = Y[:, (-1,)].nonzero()[0] + labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0] + + clf = label_propagation.LabelPropagation(max_iter=100, tol=1e-10, gamma=0.1) + clf.fit(X, y) + # adopting notation from Zhu et al 2002 + T_bar = clf._build_graph() + Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))] + Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))] + Y = Y[:, :-1] + Y_l = Y[labelled_idx, :] + Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l) + + expected = Y.copy() + expected[unlabelled_idx, :] = Y_u + expected /= expected.sum(axis=1)[:, np.newaxis] + + assert_allclose(expected, clf.label_distributions_, atol=1e-4) + + +@pytest.mark.parametrize("accepted_sparse_type", ["sparse_csr", "sparse_csc"]) +@pytest.mark.parametrize("index_dtype", [np.int32, np.int64]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_sparse_input_types( + accepted_sparse_type, index_dtype, dtype, Estimator, parameters +): + # This is non-regression test for #17085 + X = _convert_container([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], accepted_sparse_type) + X.data = X.data.astype(dtype, copy=False) + X.indices = X.indices.astype(index_dtype, copy=False) + X.indptr = X.indptr.astype(index_dtype, copy=False) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(X, labels) + assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1])) + + +@pytest.mark.parametrize("constructor_type", CONSTRUCTOR_TYPES) +def test_convergence_speed(constructor_type): + # This is a non-regression test for #5774 + X = _convert_container([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]], constructor_type) + y = np.array([0, 1, -1]) + mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=5000) + mdl.fit(X, y) + + # this should converge quickly: + assert mdl.n_iter_ < 10 + assert_array_equal(mdl.predict(X), [0, 1, 1]) + + +def test_convergence_warning(): + # This is a non-regression test for #5774 + X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]]) + y = np.array([0, 1, -1]) + mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=1) + warn_msg = "max_iter=1 was reached without convergence." + with pytest.warns(ConvergenceWarning, match=warn_msg): + mdl.fit(X, y) + assert mdl.n_iter_ == mdl.max_iter + + mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=1) + with pytest.warns(ConvergenceWarning, match=warn_msg): + mdl.fit(X, y) + assert mdl.n_iter_ == mdl.max_iter + + mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=500) + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + mdl.fit(X, y) + + mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=500) + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + mdl.fit(X, y) + + +@pytest.mark.parametrize( + "LabelPropagationCls", + [label_propagation.LabelSpreading, label_propagation.LabelPropagation], +) +def test_label_propagation_non_zero_normalizer(LabelPropagationCls): + # check that we don't divide by zero in case of null normalizer + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/pull/15946 + # https://github.com/scikit-learn/scikit-learn/issues/9292 + X = np.array([[100.0, 100.0], [100.0, 100.0], [0.0, 0.0], [0.0, 0.0]]) + y = np.array([0, 1, -1, -1]) + mdl = LabelPropagationCls(kernel="knn", max_iter=100, n_neighbors=1) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + mdl.fit(X, y) + + +def test_predict_sparse_callable_kernel(global_dtype): + # This is a non-regression test for #15866 + + # Custom sparse kernel (top-K RBF) + def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5): + nn = NearestNeighbors(n_neighbors=10, metric="euclidean", n_jobs=2) + nn.fit(X) + W = -1 * nn.kneighbors_graph(Y, mode="distance").power(2) * gamma + np.exp(W.data, out=W.data) + assert issparse(W) + return W.T + + n_classes = 4 + n_samples = 500 + n_test = 10 + X, y = make_classification( + n_classes=n_classes, + n_samples=n_samples, + n_features=20, + n_informative=20, + n_redundant=0, + n_repeated=0, + random_state=0, + ) + X = X.astype(global_dtype) + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=0 + ) + + model = label_propagation.LabelSpreading(kernel=topk_rbf) + model.fit(X_train, y_train) + assert model.score(X_test, y_test) >= 0.9 + + model = label_propagation.LabelPropagation(kernel=topk_rbf) + model.fit(X_train, y_train) + assert model.score(X_test, y_test) >= 0.9 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_self_training.py b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_self_training.py new file mode 100644 index 0000000000000000000000000000000000000000..2efeb32446f8927071d873dd6e586945fb73f6d8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_self_training.py @@ -0,0 +1,345 @@ +from math import ceil + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.datasets import load_iris, make_blobs +from sklearn.ensemble import StackingClassifier +from sklearn.exceptions import NotFittedError +from sklearn.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.neighbors import KNeighborsClassifier +from sklearn.semi_supervised import SelfTrainingClassifier +from sklearn.svm import SVC +from sklearn.tree import DecisionTreeClassifier + +# Author: Oliver Rausch +# License: BSD 3 clause + +# load the iris dataset and randomly permute it +iris = load_iris() +X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=0 +) + +n_labeled_samples = 50 + +y_train_missing_labels = y_train.copy() +y_train_missing_labels[n_labeled_samples:] = -1 +mapping = {0: "A", 1: "B", 2: "C", -1: "-1"} +y_train_missing_strings = np.vectorize(mapping.get)(y_train_missing_labels).astype( + object +) +y_train_missing_strings[y_train_missing_labels == -1] = -1 + + +def test_warns_k_best(): + st = SelfTrainingClassifier(KNeighborsClassifier(), criterion="k_best", k_best=1000) + with pytest.warns(UserWarning, match="k_best is larger than"): + st.fit(X_train, y_train_missing_labels) + + assert st.termination_condition_ == "all_labeled" + + +@pytest.mark.parametrize( + "base_estimator", + [KNeighborsClassifier(), SVC(gamma="scale", probability=True, random_state=0)], +) +@pytest.mark.parametrize("selection_crit", ["threshold", "k_best"]) +def test_classification(base_estimator, selection_crit): + # Check classification for various parameter settings. + # Also assert that predictions for strings and numerical labels are equal. + # Also test for multioutput classification + threshold = 0.75 + max_iter = 10 + st = SelfTrainingClassifier( + base_estimator, max_iter=max_iter, threshold=threshold, criterion=selection_crit + ) + st.fit(X_train, y_train_missing_labels) + pred = st.predict(X_test) + proba = st.predict_proba(X_test) + + st_string = SelfTrainingClassifier( + base_estimator, max_iter=max_iter, criterion=selection_crit, threshold=threshold + ) + st_string.fit(X_train, y_train_missing_strings) + pred_string = st_string.predict(X_test) + proba_string = st_string.predict_proba(X_test) + + assert_array_equal(np.vectorize(mapping.get)(pred), pred_string) + assert_array_equal(proba, proba_string) + + assert st.termination_condition_ == st_string.termination_condition_ + # Check consistency between labeled_iter, n_iter and max_iter + labeled = y_train_missing_labels != -1 + # assert that labeled samples have labeled_iter = 0 + assert_array_equal(st.labeled_iter_ == 0, labeled) + # assert that labeled samples do not change label during training + assert_array_equal(y_train_missing_labels[labeled], st.transduction_[labeled]) + + # assert that the max of the iterations is less than the total amount of + # iterations + assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter + assert np.max(st_string.labeled_iter_) <= st_string.n_iter_ <= max_iter + + # check shapes + assert st.labeled_iter_.shape == st.transduction_.shape + assert st_string.labeled_iter_.shape == st_string.transduction_.shape + + +def test_k_best(): + st = SelfTrainingClassifier( + KNeighborsClassifier(n_neighbors=1), + criterion="k_best", + k_best=10, + max_iter=None, + ) + y_train_only_one_label = np.copy(y_train) + y_train_only_one_label[1:] = -1 + n_samples = y_train.shape[0] + + n_expected_iter = ceil((n_samples - 1) / 10) + st.fit(X_train, y_train_only_one_label) + assert st.n_iter_ == n_expected_iter + + # Check labeled_iter_ + assert np.sum(st.labeled_iter_ == 0) == 1 + for i in range(1, n_expected_iter): + assert np.sum(st.labeled_iter_ == i) == 10 + assert np.sum(st.labeled_iter_ == n_expected_iter) == (n_samples - 1) % 10 + assert st.termination_condition_ == "all_labeled" + + +def test_sanity_classification(): + base_estimator = SVC(gamma="scale", probability=True) + base_estimator.fit(X_train[n_labeled_samples:], y_train[n_labeled_samples:]) + + st = SelfTrainingClassifier(base_estimator) + st.fit(X_train, y_train_missing_labels) + + pred1, pred2 = base_estimator.predict(X_test), st.predict(X_test) + assert not np.array_equal(pred1, pred2) + score_supervised = accuracy_score(base_estimator.predict(X_test), y_test) + score_self_training = accuracy_score(st.predict(X_test), y_test) + + assert score_self_training > score_supervised + + +def test_none_iter(): + # Check that the all samples were labeled after a 'reasonable' number of + # iterations. + st = SelfTrainingClassifier(KNeighborsClassifier(), threshold=0.55, max_iter=None) + st.fit(X_train, y_train_missing_labels) + + assert st.n_iter_ < 10 + assert st.termination_condition_ == "all_labeled" + + +@pytest.mark.parametrize( + "base_estimator", + [KNeighborsClassifier(), SVC(gamma="scale", probability=True, random_state=0)], +) +@pytest.mark.parametrize("y", [y_train_missing_labels, y_train_missing_strings]) +def test_zero_iterations(base_estimator, y): + # Check classification for zero iterations. + # Fitting a SelfTrainingClassifier with zero iterations should give the + # same results as fitting a supervised classifier. + # This also asserts that string arrays work as expected. + + clf1 = SelfTrainingClassifier(base_estimator, max_iter=0) + + clf1.fit(X_train, y) + + clf2 = base_estimator.fit(X_train[:n_labeled_samples], y[:n_labeled_samples]) + + assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) + assert clf1.termination_condition_ == "max_iter" + + +def test_prefitted_throws_error(): + # Test that passing a pre-fitted classifier and calling predict throws an + # error + knn = KNeighborsClassifier() + knn.fit(X_train, y_train) + st = SelfTrainingClassifier(knn) + with pytest.raises( + NotFittedError, + match="This SelfTrainingClassifier instance is not fitted yet", + ): + st.predict(X_train) + + +@pytest.mark.parametrize("max_iter", range(1, 5)) +def test_labeled_iter(max_iter): + # Check that the amount of datapoints labeled in iteration 0 is equal to + # the amount of labeled datapoints we passed. + st = SelfTrainingClassifier(KNeighborsClassifier(), max_iter=max_iter) + + st.fit(X_train, y_train_missing_labels) + amount_iter_0 = len(st.labeled_iter_[st.labeled_iter_ == 0]) + assert amount_iter_0 == n_labeled_samples + # Check that the max of the iterations is less than the total amount of + # iterations + assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter + + +def test_no_unlabeled(): + # Test that training on a fully labeled dataset produces the same results + # as training the classifier by itself. + knn = KNeighborsClassifier() + knn.fit(X_train, y_train) + st = SelfTrainingClassifier(knn) + with pytest.warns(UserWarning, match="y contains no unlabeled samples"): + st.fit(X_train, y_train) + assert_array_equal(knn.predict(X_test), st.predict(X_test)) + # Assert that all samples were labeled in iteration 0 (since there were no + # unlabeled samples). + assert np.all(st.labeled_iter_ == 0) + assert st.termination_condition_ == "all_labeled" + + +def test_early_stopping(): + svc = SVC(gamma="scale", probability=True) + st = SelfTrainingClassifier(svc) + X_train_easy = [[1], [0], [1], [0.5]] + y_train_easy = [1, 0, -1, -1] + # X = [[0.5]] cannot be predicted on with a high confidence, so training + # stops early + st.fit(X_train_easy, y_train_easy) + assert st.n_iter_ == 1 + assert st.termination_condition_ == "no_change" + + +def test_strings_dtype(): + clf = SelfTrainingClassifier(KNeighborsClassifier()) + X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) + labels_multiclass = ["one", "two", "three"] + + y_strings = np.take(labels_multiclass, y) + + with pytest.raises(ValueError, match="dtype"): + clf.fit(X, y_strings) + + +@pytest.mark.parametrize("verbose", [True, False]) +def test_verbose(capsys, verbose): + clf = SelfTrainingClassifier(KNeighborsClassifier(), verbose=verbose) + clf.fit(X_train, y_train_missing_labels) + + captured = capsys.readouterr() + + if verbose: + assert "iteration" in captured.out + else: + assert "iteration" not in captured.out + + +def test_verbose_k_best(capsys): + st = SelfTrainingClassifier( + KNeighborsClassifier(n_neighbors=1), + criterion="k_best", + k_best=10, + verbose=True, + max_iter=None, + ) + + y_train_only_one_label = np.copy(y_train) + y_train_only_one_label[1:] = -1 + n_samples = y_train.shape[0] + + n_expected_iter = ceil((n_samples - 1) / 10) + st.fit(X_train, y_train_only_one_label) + + captured = capsys.readouterr() + + msg = "End of iteration {}, added {} new labels." + for i in range(1, n_expected_iter): + assert msg.format(i, 10) in captured.out + + assert msg.format(n_expected_iter, (n_samples - 1) % 10) in captured.out + + +def test_k_best_selects_best(): + # Tests that the labels added by st really are the 10 best labels. + svc = SVC(gamma="scale", probability=True, random_state=0) + st = SelfTrainingClassifier(svc, criterion="k_best", max_iter=1, k_best=10) + has_label = y_train_missing_labels != -1 + st.fit(X_train, y_train_missing_labels) + + got_label = ~has_label & (st.transduction_ != -1) + + svc.fit(X_train[has_label], y_train_missing_labels[has_label]) + pred = svc.predict_proba(X_train[~has_label]) + max_proba = np.max(pred, axis=1) + + most_confident_svc = X_train[~has_label][np.argsort(max_proba)[-10:]] + added_by_st = X_train[np.where(got_label)].tolist() + + for row in most_confident_svc.tolist(): + assert row in added_by_st + + +def test_base_estimator_meta_estimator(): + # Check that a meta-estimator relying on an estimator implementing + # `predict_proba` will work even if it does not expose this method before being + # fitted. + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/19119 + + base_estimator = StackingClassifier( + estimators=[ + ("svc_1", SVC(probability=True)), + ("svc_2", SVC(probability=True)), + ], + final_estimator=SVC(probability=True), + cv=2, + ) + + assert hasattr(base_estimator, "predict_proba") + clf = SelfTrainingClassifier(base_estimator=base_estimator) + clf.fit(X_train, y_train_missing_labels) + clf.predict_proba(X_test) + + base_estimator = StackingClassifier( + estimators=[ + ("svc_1", SVC(probability=False)), + ("svc_2", SVC(probability=False)), + ], + final_estimator=SVC(probability=False), + cv=2, + ) + + assert not hasattr(base_estimator, "predict_proba") + clf = SelfTrainingClassifier(base_estimator=base_estimator) + with pytest.raises(AttributeError): + clf.fit(X_train, y_train_missing_labels) + + +def test_self_training_estimator_attribute_error(): + """Check that we raise the proper AttributeErrors when the `base_estimator` + does not implement the `predict_proba` method, which is called from within + `fit`, or `decision_function`, which is decorated with `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + # `SVC` with `probability=False` does not implement 'predict_proba' that + # is required internally in `fit` of `SelfTrainingClassifier`. We expect + # an AttributeError to be raised. + base_estimator = SVC(probability=False, gamma="scale") + self_training = SelfTrainingClassifier(base_estimator) + + with pytest.raises(AttributeError, match="has no attribute 'predict_proba'"): + self_training.fit(X_train, y_train_missing_labels) + + # `DecisionTreeClassifier` does not implement 'decision_function' and + # should raise an AttributeError + self_training = SelfTrainingClassifier(base_estimator=DecisionTreeClassifier()) + + outer_msg = "This 'SelfTrainingClassifier' has no attribute 'decision_function'" + inner_msg = "'DecisionTreeClassifier' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + self_training.fit(X_train, y_train_missing_labels).decision_function(X_train) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cfb42c73e11818e0f1d08190d49457531242a85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__init__.py @@ -0,0 +1,24 @@ +""" +The :mod:`sklearn.tree` module includes decision tree-based models for +classification and regression. +""" + +from ._classes import ( + BaseDecisionTree, + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) +from ._export import export_graphviz, export_text, plot_tree + +__all__ = [ + "BaseDecisionTree", + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", + "export_graphviz", + "plot_tree", + "export_text", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21bd5d3945e70c603815fdc219e82d82203ced04 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_classes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_classes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e79a40832f34276901e3cee769fa8e9d1c19f3e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_classes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_export.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_export.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08886f7e5cd81630ae825053a312f670f5ad3e1b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_export.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_reingold_tilford.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_reingold_tilford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5548424800f8a54ff90d0053e0260568b7cf69f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/__pycache__/_reingold_tilford.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_classes.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..00d61f184731dbb21e8146c8500ba43743fc576d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_classes.py @@ -0,0 +1,1938 @@ +""" +This module gathers tree-based methods, including decision, regression and +randomized trees. Single and multi-output problems are both handled. +""" + +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Noel Dawe +# Satrajit Gosh +# Joly Arnaud +# Fares Hedayati +# Nelson Liu +# +# License: BSD 3 clause + +import copy +import numbers +from abc import ABCMeta, abstractmethod +from math import ceil +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import issparse + +from ..base import ( + BaseEstimator, + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, + clone, + is_classifier, +) +from ..utils import Bunch, check_random_state, compute_sample_weight +from ..utils._param_validation import Hidden, Interval, RealNotInt, StrOptions +from ..utils.multiclass import check_classification_targets +from ..utils.validation import ( + _assert_all_finite_element_wise, + _check_sample_weight, + assert_all_finite, + check_is_fitted, +) +from . import _criterion, _splitter, _tree +from ._criterion import Criterion +from ._splitter import Splitter +from ._tree import ( + BestFirstTreeBuilder, + DepthFirstTreeBuilder, + Tree, + _build_pruned_tree_ccp, + ccp_pruning_path, +) +from ._utils import _any_isnan_axis0 + +__all__ = [ + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", +] + + +# ============================================================================= +# Types and constants +# ============================================================================= + +DTYPE = _tree.DTYPE +DOUBLE = _tree.DOUBLE + +CRITERIA_CLF = { + "gini": _criterion.Gini, + "log_loss": _criterion.Entropy, + "entropy": _criterion.Entropy, +} +CRITERIA_REG = { + "squared_error": _criterion.MSE, + "friedman_mse": _criterion.FriedmanMSE, + "absolute_error": _criterion.MAE, + "poisson": _criterion.Poisson, +} + +DENSE_SPLITTERS = {"best": _splitter.BestSplitter, "random": _splitter.RandomSplitter} + +SPARSE_SPLITTERS = { + "best": _splitter.BestSparseSplitter, + "random": _splitter.RandomSparseSplitter, +} + +# ============================================================================= +# Base decision tree +# ============================================================================= + + +class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for decision trees. + + Warning: This class should not be used directly. + Use derived classes instead. + """ + + _parameter_constraints: dict = { + "splitter": [StrOptions({"best", "random"})], + "max_depth": [Interval(Integral, 1, None, closed="left"), None], + "min_samples_split": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0.0, 1.0, closed="right"), + ], + "min_samples_leaf": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0.0, 1.0, closed="neither"), + ], + "min_weight_fraction_leaf": [Interval(Real, 0.0, 0.5, closed="both")], + "max_features": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0.0, 1.0, closed="right"), + StrOptions({"sqrt", "log2"}), + None, + ], + "random_state": ["random_state"], + "max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None], + "min_impurity_decrease": [Interval(Real, 0.0, None, closed="left")], + "ccp_alpha": [Interval(Real, 0.0, None, closed="left")], + "monotonic_cst": ["array-like", None], + } + + @abstractmethod + def __init__( + self, + *, + criterion, + splitter, + max_depth, + min_samples_split, + min_samples_leaf, + min_weight_fraction_leaf, + max_features, + max_leaf_nodes, + random_state, + min_impurity_decrease, + class_weight=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + self.criterion = criterion + self.splitter = splitter + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.max_leaf_nodes = max_leaf_nodes + self.random_state = random_state + self.min_impurity_decrease = min_impurity_decrease + self.class_weight = class_weight + self.ccp_alpha = ccp_alpha + self.monotonic_cst = monotonic_cst + + def get_depth(self): + """Return the depth of the decision tree. + + The depth of a tree is the maximum distance between the root + and any leaf. + + Returns + ------- + self.tree_.max_depth : int + The maximum depth of the tree. + """ + check_is_fitted(self) + return self.tree_.max_depth + + def get_n_leaves(self): + """Return the number of leaves of the decision tree. + + Returns + ------- + self.tree_.n_leaves : int + Number of leaves. + """ + check_is_fitted(self) + return self.tree_.n_leaves + + def _support_missing_values(self, X): + return ( + not issparse(X) + and self._get_tags()["allow_nan"] + and self.monotonic_cst is None + ) + + def _compute_missing_values_in_feature_mask(self, X, estimator_name=None): + """Return boolean mask denoting if there are missing values for each feature. + + This method also ensures that X is finite. + + Parameter + --------- + X : array-like of shape (n_samples, n_features), dtype=DOUBLE + Input data. + + estimator_name : str or None, default=None + Name to use when raising an error. Defaults to the class name. + + Returns + ------- + missing_values_in_feature_mask : ndarray of shape (n_features,), or None + Missing value mask. If missing values are not supported or there + are no missing values, return None. + """ + estimator_name = estimator_name or self.__class__.__name__ + common_kwargs = dict(estimator_name=estimator_name, input_name="X") + + if not self._support_missing_values(X): + assert_all_finite(X, **common_kwargs) + return None + + with np.errstate(over="ignore"): + overall_sum = np.sum(X) + + if not np.isfinite(overall_sum): + # Raise a ValueError in case of the presence of an infinite element. + _assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs) + + # If the sum is not nan, then there are no missing values + if not np.isnan(overall_sum): + return None + + missing_values_in_feature_mask = _any_isnan_axis0(X) + return missing_values_in_feature_mask + + def _fit( + self, + X, + y, + sample_weight=None, + check_input=True, + missing_values_in_feature_mask=None, + ): + random_state = check_random_state(self.random_state) + + if check_input: + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. + + # _compute_missing_values_in_feature_mask will check for finite values and + # compute the missing mask if the tree supports missing values + check_X_params = dict( + dtype=DTYPE, accept_sparse="csc", force_all_finite=False + ) + check_y_params = dict(ensure_2d=False, dtype=None) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + + missing_values_in_feature_mask = ( + self._compute_missing_values_in_feature_mask(X) + ) + if issparse(X): + X.sort_indices() + + if X.indices.dtype != np.intc or X.indptr.dtype != np.intc: + raise ValueError( + "No support for np.int64 index based sparse matrices" + ) + + if self.criterion == "poisson": + if np.any(y < 0): + raise ValueError( + "Some value(s) of y are negative which is" + " not allowed for Poisson regression." + ) + if np.sum(y) <= 0: + raise ValueError( + "Sum of y is not positive which is " + "necessary for Poisson regression." + ) + + # Determine output settings + n_samples, self.n_features_in_ = X.shape + is_classification = is_classifier(self) + + y = np.atleast_1d(y) + expanded_class_weight = None + + if y.ndim == 1: + # reshape is necessary to preserve the data contiguity against vs + # [:, np.newaxis] that does not. + y = np.reshape(y, (-1, 1)) + + self.n_outputs_ = y.shape[1] + + if is_classification: + check_classification_targets(y) + y = np.copy(y) + + self.classes_ = [] + self.n_classes_ = [] + + if self.class_weight is not None: + y_original = np.copy(y) + + y_encoded = np.zeros(y.shape, dtype=int) + for k in range(self.n_outputs_): + classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True) + self.classes_.append(classes_k) + self.n_classes_.append(classes_k.shape[0]) + y = y_encoded + + if self.class_weight is not None: + expanded_class_weight = compute_sample_weight( + self.class_weight, y_original + ) + + self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) + + if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: + y = np.ascontiguousarray(y, dtype=DOUBLE) + + max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth + + if isinstance(self.min_samples_leaf, numbers.Integral): + min_samples_leaf = self.min_samples_leaf + else: # float + min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples)) + + if isinstance(self.min_samples_split, numbers.Integral): + min_samples_split = self.min_samples_split + else: # float + min_samples_split = int(ceil(self.min_samples_split * n_samples)) + min_samples_split = max(2, min_samples_split) + + min_samples_split = max(min_samples_split, 2 * min_samples_leaf) + + if isinstance(self.max_features, str): + if self.max_features == "sqrt": + max_features = max(1, int(np.sqrt(self.n_features_in_))) + elif self.max_features == "log2": + max_features = max(1, int(np.log2(self.n_features_in_))) + elif self.max_features is None: + max_features = self.n_features_in_ + elif isinstance(self.max_features, numbers.Integral): + max_features = self.max_features + else: # float + if self.max_features > 0.0: + max_features = max(1, int(self.max_features * self.n_features_in_)) + else: + max_features = 0 + + self.max_features_ = max_features + + max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes + + if len(y) != n_samples: + raise ValueError( + "Number of labels=%d does not match number of samples=%d" + % (len(y), n_samples) + ) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, DOUBLE) + + if expanded_class_weight is not None: + if sample_weight is not None: + sample_weight = sample_weight * expanded_class_weight + else: + sample_weight = expanded_class_weight + + # Set min_weight_leaf from min_weight_fraction_leaf + if sample_weight is None: + min_weight_leaf = self.min_weight_fraction_leaf * n_samples + else: + min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight) + + # Build tree + criterion = self.criterion + if not isinstance(criterion, Criterion): + if is_classification: + criterion = CRITERIA_CLF[self.criterion]( + self.n_outputs_, self.n_classes_ + ) + else: + criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples) + else: + # Make a deepcopy in case the criterion has mutable attributes that + # might be shared and modified concurrently during parallel fitting + criterion = copy.deepcopy(criterion) + + SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS + + splitter = self.splitter + if self.monotonic_cst is None: + monotonic_cst = None + else: + if self.n_outputs_ > 1: + raise ValueError( + "Monotonicity constraints are not supported with multiple outputs." + ) + # Check to correct monotonicity constraint' specification, + # by applying element-wise logical conjunction + # Note: we do not cast `np.asarray(self.monotonic_cst, dtype=np.int8)` + # straight away here so as to generate error messages for invalid + # values using the original values prior to any dtype related conversion. + monotonic_cst = np.asarray(self.monotonic_cst) + if monotonic_cst.shape[0] != X.shape[1]: + raise ValueError( + "monotonic_cst has shape {} but the input data " + "X has {} features.".format(monotonic_cst.shape[0], X.shape[1]) + ) + valid_constraints = np.isin(monotonic_cst, (-1, 0, 1)) + if not np.all(valid_constraints): + unique_constaints_value = np.unique(monotonic_cst) + raise ValueError( + "monotonic_cst must be None or an array-like of -1, 0 or 1, but" + f" got {unique_constaints_value}" + ) + monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8) + if is_classifier(self): + if self.n_classes_[0] > 2: + raise ValueError( + "Monotonicity constraints are not supported with multiclass " + "classification" + ) + # Binary classification trees are built by constraining probabilities + # of the *negative class* in order to make the implementation similar + # to regression trees. + # Since self.monotonic_cst encodes constraints on probabilities of the + # *positive class*, all signs must be flipped. + monotonic_cst *= -1 + + if not isinstance(self.splitter, Splitter): + splitter = SPLITTERS[self.splitter]( + criterion, + self.max_features_, + min_samples_leaf, + min_weight_leaf, + random_state, + monotonic_cst, + ) + + if is_classifier(self): + self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_) + else: + self.tree_ = Tree( + self.n_features_in_, + # TODO: tree shouldn't need this in this case + np.array([1] * self.n_outputs_, dtype=np.intp), + self.n_outputs_, + ) + + # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise + if max_leaf_nodes < 0: + builder = DepthFirstTreeBuilder( + splitter, + min_samples_split, + min_samples_leaf, + min_weight_leaf, + max_depth, + self.min_impurity_decrease, + ) + else: + builder = BestFirstTreeBuilder( + splitter, + min_samples_split, + min_samples_leaf, + min_weight_leaf, + max_depth, + max_leaf_nodes, + self.min_impurity_decrease, + ) + + builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask) + + if self.n_outputs_ == 1 and is_classifier(self): + self.n_classes_ = self.n_classes_[0] + self.classes_ = self.classes_[0] + + self._prune_tree() + + return self + + def _validate_X_predict(self, X, check_input): + """Validate the training data on predict (probabilities).""" + if check_input: + if self._support_missing_values(X): + force_all_finite = "allow-nan" + else: + force_all_finite = True + X = self._validate_data( + X, + dtype=DTYPE, + accept_sparse="csr", + reset=False, + force_all_finite=force_all_finite, + ) + if issparse(X) and ( + X.indices.dtype != np.intc or X.indptr.dtype != np.intc + ): + raise ValueError("No support for np.int64 index based sparse matrices") + else: + # The number of features is checked regardless of `check_input` + self._check_n_features(X, reset=False) + return X + + def predict(self, X, check_input=True): + """Predict class or regression value for X. + + For a classification model, the predicted class for each sample in X is + returned. For a regression model, the predicted value based on X is + returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The predicted classes, or the predict values. + """ + check_is_fitted(self) + X = self._validate_X_predict(X, check_input) + proba = self.tree_.predict(X) + n_samples = X.shape[0] + + # Classification + if is_classifier(self): + if self.n_outputs_ == 1: + return self.classes_.take(np.argmax(proba, axis=1), axis=0) + + else: + class_type = self.classes_[0].dtype + predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type) + for k in range(self.n_outputs_): + predictions[:, k] = self.classes_[k].take( + np.argmax(proba[:, k], axis=1), axis=0 + ) + + return predictions + + # Regression + else: + if self.n_outputs_ == 1: + return proba[:, 0] + + else: + return proba[:, :, 0] + + def apply(self, X, check_input=True): + """Return the index of the leaf that each sample is predicted as. + + .. versionadded:: 0.17 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + X_leaves : array-like of shape (n_samples,) + For each datapoint x in X, return the index of the leaf x + ends up in. Leaves are numbered within + ``[0; self.tree_.node_count)``, possibly with gaps in the + numbering. + """ + check_is_fitted(self) + X = self._validate_X_predict(X, check_input) + return self.tree_.apply(X) + + def decision_path(self, X, check_input=True): + """Return the decision path in the tree. + + .. versionadded:: 0.18 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + indicator : sparse matrix of shape (n_samples, n_nodes) + Return a node indicator CSR matrix where non zero elements + indicates that the samples goes through the nodes. + """ + X = self._validate_X_predict(X, check_input) + return self.tree_.decision_path(X) + + def _prune_tree(self): + """Prune tree using Minimal Cost-Complexity Pruning.""" + check_is_fitted(self) + + if self.ccp_alpha == 0.0: + return + + # build pruned tree + if is_classifier(self): + n_classes = np.atleast_1d(self.n_classes_) + pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_) + else: + pruned_tree = Tree( + self.n_features_in_, + # TODO: the tree shouldn't need this param + np.array([1] * self.n_outputs_, dtype=np.intp), + self.n_outputs_, + ) + _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha) + + self.tree_ = pruned_tree + + def cost_complexity_pruning_path(self, X, y, sample_weight=None): + """Compute the pruning path during Minimal Cost-Complexity Pruning. + + See :ref:`minimal_cost_complexity_pruning` for details on the pruning + process. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csc_matrix``. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The target values (class labels) as integers or strings. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. Splits are also + ignored if they would result in any single class carrying a + negative weight in either child node. + + Returns + ------- + ccp_path : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + ccp_alphas : ndarray + Effective alphas of subtree during pruning. + + impurities : ndarray + Sum of the impurities of the subtree leaves for the + corresponding alpha value in ``ccp_alphas``. + """ + est = clone(self).set_params(ccp_alpha=0.0) + est.fit(X, y, sample_weight=sample_weight) + return Bunch(**ccp_pruning_path(est.tree_)) + + @property + def feature_importances_(self): + """Return the feature importances. + + The importance of a feature is computed as the (normalized) total + reduction of the criterion brought by that feature. + It is also known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + Returns + ------- + feature_importances_ : ndarray of shape (n_features,) + Normalized total reduction of criteria by feature + (Gini importance). + """ + check_is_fitted(self) + + return self.tree_.compute_feature_importances() + + +# ============================================================================= +# Public estimators +# ============================================================================= + + +class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree): + """A decision tree classifier. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"gini", "entropy", "log_loss"}, default="gini" + The function to measure the quality of a split. Supported criteria are + "gini" for the Gini impurity and "log_loss" and "entropy" both for the + Shannon information gain, see :ref:`tree_mathematical_formulation`. + + splitter : {"best", "random"}, default="best" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float or {"sqrt", "log2"}, default=None + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at + each split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the estimator. The features are always + randomly permuted at each split, even if ``splitter`` is set to + ``"best"``. When ``max_features < n_features``, the algorithm will + select ``max_features`` at random at each split before finding the best + split among them. But the best found split may vary across different + runs, even if ``max_features=n_features``. That is the case, if the + improvement of the criterion is identical for several splits and one + split has to be selected at random. To obtain a deterministic behaviour + during fitting, ``random_state`` has to be fixed to an integer. + See :term:`Glossary ` for details. + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + class_weight : dict, list of dict or "balanced", default=None + Weights associated with classes in the form ``{class_label: weight}``. + If None, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + Note that for multioutput (including multilabel) weights should be + defined for each class of every column in its own dict. For example, + for four-class multilabel classification weights should be + [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of + [{1:1}, {2:5}, {3:1}, {4:1}]. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + For multi-output, the weights of each column of y will be multiplied. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multiclass classifications (i.e. when `n_classes > 2`), + - multioutput classifications (i.e. when `n_outputs_ > 1`), + - classifications trained on data with missing values. + + The constraints hold over the probability of the positive class. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of ndarray + The classes labels (single output problem), + or a list of arrays of class labels (multi-output problem). + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance [4]_. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + max_features_ : int + The inferred value of max_features. + + n_classes_ : int or list of int + The number of classes (for single output problems), + or a list containing the number of classes for each + output (for multi-output problems). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + DecisionTreeRegressor : A decision tree regressor. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + The :meth:`predict` method operates using the :func:`numpy.argmax` + function on the outputs of :meth:`predict_proba`. This means that in + case the highest predicted probabilities are tied, the classifier will + predict the tied class with the lowest index in :term:`classes_`. + + References + ---------- + + .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning + + .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification + and Regression Trees", Wadsworth, Belmont, CA, 1984. + + .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical + Learning", Springer, 2009. + + .. [4] L. Breiman, and A. Cutler, "Random Forests", + https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import cross_val_score + >>> from sklearn.tree import DecisionTreeClassifier + >>> clf = DecisionTreeClassifier(random_state=0) + >>> iris = load_iris() + >>> cross_val_score(clf, iris.data, iris.target, cv=10) + ... # doctest: +SKIP + ... + array([ 1. , 0.93..., 0.86..., 0.93..., 0.93..., + 0.93..., 0.93..., 1. , 0.93..., 1. ]) + """ + + _parameter_constraints: dict = { + **BaseDecisionTree._parameter_constraints, + "criterion": [StrOptions({"gini", "entropy", "log_loss"}), Hidden(Criterion)], + "class_weight": [dict, list, StrOptions({"balanced"}), None], + } + + def __init__( + self, + *, + criterion="gini", + splitter="best", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=None, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + class_weight=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + class_weight=class_weight, + random_state=random_state, + min_impurity_decrease=min_impurity_decrease, + monotonic_cst=monotonic_cst, + ccp_alpha=ccp_alpha, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, check_input=True): + """Build a decision tree classifier from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csc_matrix``. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The target values (class labels) as integers or strings. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. Splits are also + ignored if they would result in any single class carrying a + negative weight in either child node. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + self : DecisionTreeClassifier + Fitted estimator. + """ + + super()._fit( + X, + y, + sample_weight=sample_weight, + check_input=check_input, + ) + return self + + def predict_proba(self, X, check_input=True): + """Predict class probabilities of the input samples X. + + The predicted class probability is the fraction of samples of the same + class in a leaf. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \ + such arrays if n_outputs > 1 + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + X = self._validate_X_predict(X, check_input) + proba = self.tree_.predict(X) + + if self.n_outputs_ == 1: + return proba[:, : self.n_classes_] + else: + all_proba = [] + for k in range(self.n_outputs_): + proba_k = proba[:, k, : self.n_classes_[k]] + all_proba.append(proba_k) + return all_proba + + def predict_log_proba(self, X): + """Predict class log-probabilities of the input samples X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \ + such arrays if n_outputs > 1 + The class log-probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + proba = self.predict_proba(X) + + if self.n_outputs_ == 1: + return np.log(proba) + + else: + for k in range(self.n_outputs_): + proba[k] = np.log(proba[k]) + + return proba + + def _more_tags(self): + # XXX: nan is only support for dense arrays, but we set this for common test to + # pass, specifically: check_estimators_nan_inf + allow_nan = self.splitter == "best" and self.criterion in { + "gini", + "log_loss", + "entropy", + } + return {"multilabel": True, "allow_nan": allow_nan} + + +class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree): + """A decision tree regressor. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"squared_error", "friedman_mse", "absolute_error", \ + "poisson"}, default="squared_error" + The function to measure the quality of a split. Supported criteria + are "squared_error" for the mean squared error, which is equal to + variance reduction as feature selection criterion and minimizes the L2 + loss using the mean of each terminal node, "friedman_mse", which uses + mean squared error with Friedman's improvement score for potential + splits, "absolute_error" for the mean absolute error, which minimizes + the L1 loss using the median of each terminal node, and "poisson" which + uses reduction in Poisson deviance to find splits. + + .. versionadded:: 0.18 + Mean Absolute Error (MAE) criterion. + + .. versionadded:: 0.24 + Poisson deviance criterion. + + splitter : {"best", "random"}, default="best" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float or {"sqrt", "log2"}, default=None + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the estimator. The features are always + randomly permuted at each split, even if ``splitter`` is set to + ``"best"``. When ``max_features < n_features``, the algorithm will + select ``max_features`` at random at each split before finding the best + split among them. But the best found split may vary across different + runs, even if ``max_features=n_features``. That is the case, if the + improvement of the criterion is identical for several splits and one + split has to be selected at random. To obtain a deterministic behaviour + during fitting, ``random_state`` has to be fixed to an integer. + See :term:`Glossary ` for details. + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multioutput regressions (i.e. when `n_outputs_ > 1`), + - regressions trained on data with missing values. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + feature_importances_ : ndarray of shape (n_features,) + The feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the + (normalized) total reduction of the criterion brought + by that feature. It is also known as the Gini importance [4]_. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + max_features_ : int + The inferred value of max_features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + DecisionTreeClassifier : A decision tree classifier. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + + .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning + + .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification + and Regression Trees", Wadsworth, Belmont, CA, 1984. + + .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical + Learning", Springer, 2009. + + .. [4] L. Breiman, and A. Cutler, "Random Forests", + https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.model_selection import cross_val_score + >>> from sklearn.tree import DecisionTreeRegressor + >>> X, y = load_diabetes(return_X_y=True) + >>> regressor = DecisionTreeRegressor(random_state=0) + >>> cross_val_score(regressor, X, y, cv=10) + ... # doctest: +SKIP + ... + array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50..., + 0.16..., 0.11..., -0.73..., -0.30..., -0.00...]) + """ + + _parameter_constraints: dict = { + **BaseDecisionTree._parameter_constraints, + "criterion": [ + StrOptions({"squared_error", "friedman_mse", "absolute_error", "poisson"}), + Hidden(Criterion), + ], + } + + def __init__( + self, + *, + criterion="squared_error", + splitter="best", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=None, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + random_state=random_state, + min_impurity_decrease=min_impurity_decrease, + ccp_alpha=ccp_alpha, + monotonic_cst=monotonic_cst, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, check_input=True): + """Build a decision tree regressor from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csc_matrix``. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The target values (real numbers). Use ``dtype=np.float64`` and + ``order='C'`` for maximum efficiency. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + self : DecisionTreeRegressor + Fitted estimator. + """ + + super()._fit( + X, + y, + sample_weight=sample_weight, + check_input=check_input, + ) + return self + + def _compute_partial_dependence_recursion(self, grid, target_features): + """Fast partial dependence computation. + + Parameters + ---------- + grid : ndarray of shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : ndarray of shape (n_target_features) + The set of target features for which the partial dependence + should be evaluated. + + Returns + ------- + averaged_predictions : ndarray of shape (n_samples,) + The value of the partial dependence function on each grid point. + """ + grid = np.asarray(grid, dtype=DTYPE, order="C") + averaged_predictions = np.zeros( + shape=grid.shape[0], dtype=np.float64, order="C" + ) + + self.tree_.compute_partial_dependence( + grid, target_features, averaged_predictions + ) + return averaged_predictions + + def _more_tags(self): + # XXX: nan is only support for dense arrays, but we set this for common test to + # pass, specifically: check_estimators_nan_inf + allow_nan = self.splitter == "best" and self.criterion in { + "squared_error", + "friedman_mse", + "poisson", + } + return {"allow_nan": allow_nan} + + +class ExtraTreeClassifier(DecisionTreeClassifier): + """An extremely randomized tree classifier. + + Extra-trees differ from classic decision trees in the way they are built. + When looking for the best split to separate the samples of a node into two + groups, random splits are drawn for each of the `max_features` randomly + selected features and the best split among those is chosen. When + `max_features` is set 1, this amounts to building a totally random + decision tree. + + Warning: Extra-trees should only be used within ensemble methods. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"gini", "entropy", "log_loss"}, default="gini" + The function to measure the quality of a split. Supported criteria are + "gini" for the Gini impurity and "log_loss" and "entropy" both for the + Shannon information gain, see :ref:`tree_mathematical_formulation`. + + splitter : {"random", "best"}, default="random" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float, {"sqrt", "log2"} or None, default="sqrt" + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at + each split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to `"sqrt"`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Used to pick randomly the `max_features` used at each split. + See :term:`Glossary ` for details. + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + class_weight : dict, list of dict or "balanced", default=None + Weights associated with classes in the form ``{class_label: weight}``. + If None, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + Note that for multioutput (including multilabel) weights should be + defined for each class of every column in its own dict. For example, + for four-class multilabel classification weights should be + [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of + [{1:1}, {2:5}, {3:1}, {4:1}]. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + For multi-output, the weights of each column of y will be multiplied. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multiclass classifications (i.e. when `n_classes > 2`), + - multioutput classifications (i.e. when `n_outputs_ > 1`), + - classifications trained on data with missing values. + + The constraints hold over the probability of the positive class. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of ndarray + The classes labels (single output problem), + or a list of arrays of class labels (multi-output problem). + + max_features_ : int + The inferred value of max_features. + + n_classes_ : int or list of int + The number of classes (for single output problems), + or a list containing the number of classes for each + output (for multi-output problems). + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + ExtraTreeRegressor : An extremely randomized tree regressor. + sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier. + sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor. + sklearn.ensemble.RandomForestClassifier : A random forest classifier. + sklearn.ensemble.RandomForestRegressor : A random forest regressor. + sklearn.ensemble.RandomTreesEmbedding : An ensemble of + totally random trees. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + + .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", + Machine Learning, 63(1), 3-42, 2006. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.ensemble import BaggingClassifier + >>> from sklearn.tree import ExtraTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> extra_tree = ExtraTreeClassifier(random_state=0) + >>> cls = BaggingClassifier(extra_tree, random_state=0).fit( + ... X_train, y_train) + >>> cls.score(X_test, y_test) + 0.8947... + """ + + def __init__( + self, + *, + criterion="gini", + splitter="random", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="sqrt", + random_state=None, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + class_weight=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + class_weight=class_weight, + min_impurity_decrease=min_impurity_decrease, + random_state=random_state, + ccp_alpha=ccp_alpha, + monotonic_cst=monotonic_cst, + ) + + +class ExtraTreeRegressor(DecisionTreeRegressor): + """An extremely randomized tree regressor. + + Extra-trees differ from classic decision trees in the way they are built. + When looking for the best split to separate the samples of a node into two + groups, random splits are drawn for each of the `max_features` randomly + selected features and the best split among those is chosen. When + `max_features` is set 1, this amounts to building a totally random + decision tree. + + Warning: Extra-trees should only be used within ensemble methods. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"squared_error", "friedman_mse", "absolute_error", "poisson"}, \ + default="squared_error" + The function to measure the quality of a split. Supported criteria + are "squared_error" for the mean squared error, which is equal to + variance reduction as feature selection criterion and minimizes the L2 + loss using the mean of each terminal node, "friedman_mse", which uses + mean squared error with Friedman's improvement score for potential + splits, "absolute_error" for the mean absolute error, which minimizes + the L1 loss using the median of each terminal node, and "poisson" which + uses reduction in Poisson deviance to find splits. + + .. versionadded:: 0.18 + Mean Absolute Error (MAE) criterion. + + .. versionadded:: 0.24 + Poisson deviance criterion. + + splitter : {"random", "best"}, default="random" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float, {"sqrt", "log2"} or None, default=1.0 + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to `1.0`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Used to pick randomly the `max_features` used at each split. + See :term:`Glossary ` for details. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multioutput regressions (i.e. when `n_outputs_ > 1`), + - regressions trained on data with missing values. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + max_features_ : int + The inferred value of max_features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + feature_importances_ : ndarray of shape (n_features,) + Return impurity-based feature importances (the higher, the more + important the feature). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + ExtraTreeClassifier : An extremely randomized tree classifier. + sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier. + sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + + .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", + Machine Learning, 63(1), 3-42, 2006. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.ensemble import BaggingRegressor + >>> from sklearn.tree import ExtraTreeRegressor + >>> X, y = load_diabetes(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> extra_tree = ExtraTreeRegressor(random_state=0) + >>> reg = BaggingRegressor(extra_tree, random_state=0).fit( + ... X_train, y_train) + >>> reg.score(X_test, y_test) + 0.33... + """ + + def __init__( + self, + *, + criterion="squared_error", + splitter="random", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=1.0, + random_state=None, + min_impurity_decrease=0.0, + max_leaf_nodes=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + min_impurity_decrease=min_impurity_decrease, + random_state=random_state, + ccp_alpha=ccp_alpha, + monotonic_cst=monotonic_cst, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_criterion.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_criterion.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..73f2a4422d58fc8b58e4caa6d51ccd6bc4031d4b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_criterion.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_criterion.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_criterion.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6538b9b824a79ee00b955415c516df56fcfe2797 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_criterion.pxd @@ -0,0 +1,117 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Joel Nothman +# Arnaud Joly +# Jacob Schreiber +# +# License: BSD 3 clause + +# See _criterion.pyx for implementation details. +cimport numpy as cnp + +from ..utils._typedefs cimport float64_t, intp_t + + +cdef class Criterion: + # The criterion computes the impurity of a node and the reduction of + # impurity of a split on that node. It also computes the output statistics + # such as the mean in regression and class probabilities in classification. + + # Internal structures + cdef const float64_t[:, ::1] y # Values of y + cdef const float64_t[:] sample_weight # Sample weights + + cdef const intp_t[:] sample_indices # Sample indices in X, y + cdef intp_t start # samples[start:pos] are the samples in the left node + cdef intp_t pos # samples[pos:end] are the samples in the right node + cdef intp_t end + cdef intp_t n_missing # Number of missing values for the feature being evaluated + cdef bint missing_go_to_left # Whether missing values go to the left node + + cdef intp_t n_outputs # Number of outputs + cdef intp_t n_samples # Number of samples + cdef intp_t n_node_samples # Number of samples in the node (end-start) + cdef float64_t weighted_n_samples # Weighted number of samples (in total) + cdef float64_t weighted_n_node_samples # Weighted number of samples in the node + cdef float64_t weighted_n_left # Weighted number of samples in the left node + cdef float64_t weighted_n_right # Weighted number of samples in the right node + cdef float64_t weighted_n_missing # Weighted number of samples that are missing + + # The criterion object is maintained such that left and right collected + # statistics correspond to samples[start:pos] and samples[pos:end]. + + # Methods + cdef int init( + self, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight, + float64_t weighted_n_samples, + const intp_t[:] sample_indices, + intp_t start, + intp_t end + ) except -1 nogil + cdef void init_sum_missing(self) + cdef void init_missing(self, intp_t n_missing) noexcept nogil + cdef int reset(self) except -1 nogil + cdef int reverse_reset(self) except -1 nogil + cdef int update(self, intp_t new_pos) except -1 nogil + cdef float64_t node_impurity(self) noexcept nogil + cdef void children_impurity( + self, + float64_t* impurity_left, + float64_t* impurity_right + ) noexcept nogil + cdef void node_value( + self, + float64_t* dest + ) noexcept nogil + cdef void clip_node_value( + self, + float64_t* dest, + float64_t lower_bound, + float64_t upper_bound + ) noexcept nogil + cdef float64_t middle_value(self) noexcept nogil + cdef float64_t impurity_improvement( + self, + float64_t impurity_parent, + float64_t impurity_left, + float64_t impurity_right + ) noexcept nogil + cdef float64_t proxy_impurity_improvement(self) noexcept nogil + cdef bint check_monotonicity( + self, + cnp.int8_t monotonic_cst, + float64_t lower_bound, + float64_t upper_bound, + ) noexcept nogil + cdef inline bint _check_monotonicity( + self, + cnp.int8_t monotonic_cst, + float64_t lower_bound, + float64_t upper_bound, + float64_t sum_left, + float64_t sum_right, + ) noexcept nogil + +cdef class ClassificationCriterion(Criterion): + """Abstract criterion for classification.""" + + cdef intp_t[::1] n_classes + cdef intp_t max_n_classes + + cdef float64_t[:, ::1] sum_total # The sum of the weighted count of each label. + cdef float64_t[:, ::1] sum_left # Same as above, but for the left side of the split + cdef float64_t[:, ::1] sum_right # Same as above, but for the right side of the split + cdef float64_t[:, ::1] sum_missing # Same as above, but for missing values in X + +cdef class RegressionCriterion(Criterion): + """Abstract regression criterion.""" + + cdef float64_t sq_sum_total + + cdef float64_t[::1] sum_total # The sum of w*y. + cdef float64_t[::1] sum_left # Same as above, but for the left side of the split + cdef float64_t[::1] sum_right # Same as above, but for the right side of the split + cdef float64_t[::1] sum_missing # Same as above, but for missing values in X diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_export.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_export.py new file mode 100644 index 0000000000000000000000000000000000000000..f6492cf6a821f60fff8cb76da2e53ebe45ff1faa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_export.py @@ -0,0 +1,1135 @@ +""" +This module defines export functions for decision trees. +""" + +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Noel Dawe +# Satrajit Gosh +# Trevor Stephens +# Li Li +# Giuseppe Vettigli +# License: BSD 3 clause +from collections.abc import Iterable +from io import StringIO +from numbers import Integral + +import numpy as np + +from ..base import is_classifier +from ..utils._param_validation import HasMethods, Interval, StrOptions, validate_params +from ..utils.validation import check_array, check_is_fitted +from . import DecisionTreeClassifier, DecisionTreeRegressor, _criterion, _tree +from ._reingold_tilford import Tree, buchheim + + +def _color_brew(n): + """Generate n colors with equally spaced hues. + + Parameters + ---------- + n : int + The number of colors required. + + Returns + ------- + color_list : list, length n + List of n tuples of form (R, G, B) being the components of each color. + """ + color_list = [] + + # Initialize saturation & value; calculate chroma & value shift + s, v = 0.75, 0.9 + c = s * v + m = v - c + + for h in np.arange(25, 385, 360.0 / n).astype(int): + # Calculate some intermediate values + h_bar = h / 60.0 + x = c * (1 - abs((h_bar % 2) - 1)) + # Initialize RGB with same hue & chroma as our color + rgb = [ + (c, x, 0), + (x, c, 0), + (0, c, x), + (0, x, c), + (x, 0, c), + (c, 0, x), + (c, x, 0), + ] + r, g, b = rgb[int(h_bar)] + # Shift the initial RGB values to match value and store + rgb = [(int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))] + color_list.append(rgb) + + return color_list + + +class Sentinel: + def __repr__(self): + return '"tree.dot"' + + +SENTINEL = Sentinel() + + +@validate_params( + { + "decision_tree": [DecisionTreeClassifier, DecisionTreeRegressor], + "max_depth": [Interval(Integral, 0, None, closed="left"), None], + "feature_names": ["array-like", None], + "class_names": ["array-like", "boolean", None], + "label": [StrOptions({"all", "root", "none"})], + "filled": ["boolean"], + "impurity": ["boolean"], + "node_ids": ["boolean"], + "proportion": ["boolean"], + "rounded": ["boolean"], + "precision": [Interval(Integral, 0, None, closed="left"), None], + "ax": "no_validation", # delegate validation to matplotlib + "fontsize": [Interval(Integral, 0, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def plot_tree( + decision_tree, + *, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + impurity=True, + node_ids=False, + proportion=False, + rounded=False, + precision=3, + ax=None, + fontsize=None, +): + """Plot a decision tree. + + The sample counts that are shown are weighted with any sample_weights that + might be present. + + The visualization is fit automatically to the size of the axis. + Use the ``figsize`` or ``dpi`` arguments of ``plt.figure`` to control + the size of the rendering. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.21 + + Parameters + ---------- + decision_tree : decision tree regressor or classifier + The decision tree to be plotted. + + max_depth : int, default=None + The maximum depth of the representation. If None, the tree is fully + generated. + + feature_names : array-like of str, default=None + Names of each of the features. + If None, generic names will be used ("x[0]", "x[1]", ...). + + class_names : array-like of str or True, default=None + Names of each of the target classes in ascending numerical order. + Only relevant for classification and not supported for multi-output. + If ``True``, shows a symbolic representation of the class name. + + label : {'all', 'root', 'none'}, default='all' + Whether to show informative labels for impurity, etc. + Options include 'all' to show at every node, 'root' to show only at + the top root node, or 'none' to not show at any node. + + filled : bool, default=False + When set to ``True``, paint nodes to indicate majority class for + classification, extremity of values for regression, or purity of node + for multi-output. + + impurity : bool, default=True + When set to ``True``, show the impurity at each node. + + node_ids : bool, default=False + When set to ``True``, show the ID number on each node. + + proportion : bool, default=False + When set to ``True``, change the display of 'values' and/or 'samples' + to be proportions and percentages respectively. + + rounded : bool, default=False + When set to ``True``, draw node boxes with rounded corners and use + Helvetica fonts instead of Times-Roman. + + precision : int, default=3 + Number of digits of precision for floating point in the values of + impurity, threshold and value attributes of each node. + + ax : matplotlib axis, default=None + Axes to plot to. If None, use current axis. Any previous content + is cleared. + + fontsize : int, default=None + Size of text font. If None, determined automatically to fit figure. + + Returns + ------- + annotations : list of artists + List containing the artists for the annotation boxes making up the + tree. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn import tree + + >>> clf = tree.DecisionTreeClassifier(random_state=0) + >>> iris = load_iris() + + >>> clf = clf.fit(iris.data, iris.target) + >>> tree.plot_tree(clf) + [...] + """ + + check_is_fitted(decision_tree) + + exporter = _MPLTreeExporter( + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rounded=rounded, + precision=precision, + fontsize=fontsize, + ) + return exporter.export(decision_tree, ax=ax) + + +class _BaseTreeExporter: + def __init__( + self, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + impurity=True, + node_ids=False, + proportion=False, + rounded=False, + precision=3, + fontsize=None, + ): + self.max_depth = max_depth + self.feature_names = feature_names + self.class_names = class_names + self.label = label + self.filled = filled + self.impurity = impurity + self.node_ids = node_ids + self.proportion = proportion + self.rounded = rounded + self.precision = precision + self.fontsize = fontsize + + def get_color(self, value): + # Find the appropriate color & intensity for a node + if self.colors["bounds"] is None: + # Classification tree + color = list(self.colors["rgb"][np.argmax(value)]) + sorted_values = sorted(value, reverse=True) + if len(sorted_values) == 1: + alpha = 0.0 + else: + alpha = (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1]) + else: + # Regression tree or multi-output + color = list(self.colors["rgb"][0]) + alpha = (value - self.colors["bounds"][0]) / ( + self.colors["bounds"][1] - self.colors["bounds"][0] + ) + # compute the color as alpha against white + color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color] + # Return html color code in #RRGGBB format + return "#%2x%2x%2x" % tuple(color) + + def get_fill_color(self, tree, node_id): + # Fetch appropriate color for node + if "rgb" not in self.colors: + # Initialize colors and bounds if required + self.colors["rgb"] = _color_brew(tree.n_classes[0]) + if tree.n_outputs != 1: + # Find max and min impurities for multi-output + self.colors["bounds"] = (np.min(-tree.impurity), np.max(-tree.impurity)) + elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1: + # Find max and min values in leaf nodes for regression + self.colors["bounds"] = (np.min(tree.value), np.max(tree.value)) + if tree.n_outputs == 1: + node_val = tree.value[node_id][0, :] + if ( + tree.n_classes[0] == 1 + and isinstance(node_val, Iterable) + and self.colors["bounds"] is not None + ): + # Unpack the float only for the regression tree case. + # Classification tree requires an Iterable in `get_color`. + node_val = node_val.item() + else: + # If multi-output color node by impurity + node_val = -tree.impurity[node_id] + return self.get_color(node_val) + + def node_to_str(self, tree, node_id, criterion): + # Generate the node content string + if tree.n_outputs == 1: + value = tree.value[node_id][0, :] + else: + value = tree.value[node_id] + + # Should labels be shown? + labels = (self.label == "root" and node_id == 0) or self.label == "all" + + characters = self.characters + node_string = characters[-1] + + # Write node ID + if self.node_ids: + if labels: + node_string += "node " + node_string += characters[0] + str(node_id) + characters[4] + + # Write decision criteria + if tree.children_left[node_id] != _tree.TREE_LEAF: + # Always write node decision criteria, except for leaves + if self.feature_names is not None: + feature = self.feature_names[tree.feature[node_id]] + else: + feature = "x%s%s%s" % ( + characters[1], + tree.feature[node_id], + characters[2], + ) + node_string += "%s %s %s%s" % ( + feature, + characters[3], + round(tree.threshold[node_id], self.precision), + characters[4], + ) + + # Write impurity + if self.impurity: + if isinstance(criterion, _criterion.FriedmanMSE): + criterion = "friedman_mse" + elif isinstance(criterion, _criterion.MSE) or criterion == "squared_error": + criterion = "squared_error" + elif not isinstance(criterion, str): + criterion = "impurity" + if labels: + node_string += "%s = " % criterion + node_string += ( + str(round(tree.impurity[node_id], self.precision)) + characters[4] + ) + + # Write node sample count + if labels: + node_string += "samples = " + if self.proportion: + percent = ( + 100.0 * tree.n_node_samples[node_id] / float(tree.n_node_samples[0]) + ) + node_string += str(round(percent, 1)) + "%" + characters[4] + else: + node_string += str(tree.n_node_samples[node_id]) + characters[4] + + # Write node class distribution / regression value + if not self.proportion and tree.n_classes[0] != 1: + # For classification this will show the proportion of samples + value = value * tree.weighted_n_node_samples[node_id] + if labels: + node_string += "value = " + if tree.n_classes[0] == 1: + # Regression + value_text = np.around(value, self.precision) + elif self.proportion: + # Classification + value_text = np.around(value, self.precision) + elif np.all(np.equal(np.mod(value, 1), 0)): + # Classification without floating-point weights + value_text = value.astype(int) + else: + # Classification with floating-point weights + value_text = np.around(value, self.precision) + # Strip whitespace + value_text = str(value_text.astype("S32")).replace("b'", "'") + value_text = value_text.replace("' '", ", ").replace("'", "") + if tree.n_classes[0] == 1 and tree.n_outputs == 1: + value_text = value_text.replace("[", "").replace("]", "") + value_text = value_text.replace("\n ", characters[4]) + node_string += value_text + characters[4] + + # Write node majority class + if ( + self.class_names is not None + and tree.n_classes[0] != 1 + and tree.n_outputs == 1 + ): + # Only done for single-output classification trees + if labels: + node_string += "class = " + if self.class_names is not True: + class_name = self.class_names[np.argmax(value)] + else: + class_name = "y%s%s%s" % ( + characters[1], + np.argmax(value), + characters[2], + ) + node_string += class_name + + # Clean up any trailing newlines + if node_string.endswith(characters[4]): + node_string = node_string[: -len(characters[4])] + + return node_string + characters[5] + + +class _DOTTreeExporter(_BaseTreeExporter): + def __init__( + self, + out_file=SENTINEL, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + leaves_parallel=False, + impurity=True, + node_ids=False, + proportion=False, + rotate=False, + rounded=False, + special_characters=False, + precision=3, + fontname="helvetica", + ): + super().__init__( + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rounded=rounded, + precision=precision, + ) + self.leaves_parallel = leaves_parallel + self.out_file = out_file + self.special_characters = special_characters + self.fontname = fontname + self.rotate = rotate + + # PostScript compatibility for special characters + if special_characters: + self.characters = ["#", "", "", "≤", "
", ">", "<"] + else: + self.characters = ["#", "[", "]", "<=", "\\n", '"', '"'] + + # The depth of each node for plotting with 'leaf' option + self.ranks = {"leaves": []} + # The colors to render each node with + self.colors = {"bounds": None} + + def export(self, decision_tree): + # Check length of feature_names before getting into the tree node + # Raise error if length of feature_names does not match + # n_features_in_ in the decision_tree + if self.feature_names is not None: + if len(self.feature_names) != decision_tree.n_features_in_: + raise ValueError( + "Length of feature_names, %d does not match number of features, %d" + % (len(self.feature_names), decision_tree.n_features_in_) + ) + # each part writes to out_file + self.head() + # Now recurse the tree and add node & edge attributes + if isinstance(decision_tree, _tree.Tree): + self.recurse(decision_tree, 0, criterion="impurity") + else: + self.recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion) + + self.tail() + + def tail(self): + # If required, draw leaf nodes at same depth as each other + if self.leaves_parallel: + for rank in sorted(self.ranks): + self.out_file.write( + "{rank=same ; " + "; ".join(r for r in self.ranks[rank]) + "} ;\n" + ) + self.out_file.write("}") + + def head(self): + self.out_file.write("digraph Tree {\n") + + # Specify node aesthetics + self.out_file.write("node [shape=box") + rounded_filled = [] + if self.filled: + rounded_filled.append("filled") + if self.rounded: + rounded_filled.append("rounded") + if len(rounded_filled) > 0: + self.out_file.write( + ', style="%s", color="black"' % ", ".join(rounded_filled) + ) + + self.out_file.write(', fontname="%s"' % self.fontname) + self.out_file.write("] ;\n") + + # Specify graph & edge aesthetics + if self.leaves_parallel: + self.out_file.write("graph [ranksep=equally, splines=polyline] ;\n") + + self.out_file.write('edge [fontname="%s"] ;\n' % self.fontname) + + if self.rotate: + self.out_file.write("rankdir=LR ;\n") + + def recurse(self, tree, node_id, criterion, parent=None, depth=0): + if node_id == _tree.TREE_LEAF: + raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) + + left_child = tree.children_left[node_id] + right_child = tree.children_right[node_id] + + # Add node with description + if self.max_depth is None or depth <= self.max_depth: + # Collect ranks for 'leaf' option in plot_options + if left_child == _tree.TREE_LEAF: + self.ranks["leaves"].append(str(node_id)) + elif str(depth) not in self.ranks: + self.ranks[str(depth)] = [str(node_id)] + else: + self.ranks[str(depth)].append(str(node_id)) + + self.out_file.write( + "%d [label=%s" % (node_id, self.node_to_str(tree, node_id, criterion)) + ) + + if self.filled: + self.out_file.write( + ', fillcolor="%s"' % self.get_fill_color(tree, node_id) + ) + self.out_file.write("] ;\n") + + if parent is not None: + # Add edge to parent + self.out_file.write("%d -> %d" % (parent, node_id)) + if parent == 0: + # Draw True/False labels if parent is root node + angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2) + self.out_file.write(" [labeldistance=2.5, labelangle=") + if node_id == 1: + self.out_file.write('%d, headlabel="True"]' % angles[0]) + else: + self.out_file.write('%d, headlabel="False"]' % angles[1]) + self.out_file.write(" ;\n") + + if left_child != _tree.TREE_LEAF: + self.recurse( + tree, + left_child, + criterion=criterion, + parent=node_id, + depth=depth + 1, + ) + self.recurse( + tree, + right_child, + criterion=criterion, + parent=node_id, + depth=depth + 1, + ) + + else: + self.ranks["leaves"].append(str(node_id)) + + self.out_file.write('%d [label="(...)"' % node_id) + if self.filled: + # color cropped nodes grey + self.out_file.write(', fillcolor="#C0C0C0"') + self.out_file.write("] ;\n" % node_id) + + if parent is not None: + # Add edge to parent + self.out_file.write("%d -> %d ;\n" % (parent, node_id)) + + +class _MPLTreeExporter(_BaseTreeExporter): + def __init__( + self, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + impurity=True, + node_ids=False, + proportion=False, + rounded=False, + precision=3, + fontsize=None, + ): + super().__init__( + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rounded=rounded, + precision=precision, + ) + self.fontsize = fontsize + + # The depth of each node for plotting with 'leaf' option + self.ranks = {"leaves": []} + # The colors to render each node with + self.colors = {"bounds": None} + + self.characters = ["#", "[", "]", "<=", "\n", "", ""] + self.bbox_args = dict() + if self.rounded: + self.bbox_args["boxstyle"] = "round" + + self.arrow_args = dict(arrowstyle="<-") + + def _make_tree(self, node_id, et, criterion, depth=0): + # traverses _tree.Tree recursively, builds intermediate + # "_reingold_tilford.Tree" object + name = self.node_to_str(et, node_id, criterion=criterion) + if et.children_left[node_id] != _tree.TREE_LEAF and ( + self.max_depth is None or depth <= self.max_depth + ): + children = [ + self._make_tree( + et.children_left[node_id], et, criterion, depth=depth + 1 + ), + self._make_tree( + et.children_right[node_id], et, criterion, depth=depth + 1 + ), + ] + else: + return Tree(name, node_id) + return Tree(name, node_id, *children) + + def export(self, decision_tree, ax=None): + import matplotlib.pyplot as plt + from matplotlib.text import Annotation + + if ax is None: + ax = plt.gca() + ax.clear() + ax.set_axis_off() + my_tree = self._make_tree(0, decision_tree.tree_, decision_tree.criterion) + draw_tree = buchheim(my_tree) + + # important to make sure we're still + # inside the axis after drawing the box + # this makes sense because the width of a box + # is about the same as the distance between boxes + max_x, max_y = draw_tree.max_extents() + 1 + ax_width = ax.get_window_extent().width + ax_height = ax.get_window_extent().height + + scale_x = ax_width / max_x + scale_y = ax_height / max_y + self.recurse(draw_tree, decision_tree.tree_, ax, max_x, max_y) + + anns = [ann for ann in ax.get_children() if isinstance(ann, Annotation)] + + # update sizes of all bboxes + renderer = ax.figure.canvas.get_renderer() + + for ann in anns: + ann.update_bbox_position_size(renderer) + + if self.fontsize is None: + # get figure to data transform + # adjust fontsize to avoid overlap + # get max box width and height + extents = [ann.get_bbox_patch().get_window_extent() for ann in anns] + max_width = max([extent.width for extent in extents]) + max_height = max([extent.height for extent in extents]) + # width should be around scale_x in axis coordinates + size = anns[0].get_fontsize() * min( + scale_x / max_width, scale_y / max_height + ) + for ann in anns: + ann.set_fontsize(size) + + return anns + + def recurse(self, node, tree, ax, max_x, max_y, depth=0): + import matplotlib.pyplot as plt + + kwargs = dict( + bbox=self.bbox_args.copy(), + ha="center", + va="center", + zorder=100 - 10 * depth, + xycoords="axes fraction", + arrowprops=self.arrow_args.copy(), + ) + kwargs["arrowprops"]["edgecolor"] = plt.rcParams["text.color"] + + if self.fontsize is not None: + kwargs["fontsize"] = self.fontsize + + # offset things by .5 to center them in plot + xy = ((node.x + 0.5) / max_x, (max_y - node.y - 0.5) / max_y) + + if self.max_depth is None or depth <= self.max_depth: + if self.filled: + kwargs["bbox"]["fc"] = self.get_fill_color(tree, node.tree.node_id) + else: + kwargs["bbox"]["fc"] = ax.get_facecolor() + + if node.parent is None: + # root + ax.annotate(node.tree.label, xy, **kwargs) + else: + xy_parent = ( + (node.parent.x + 0.5) / max_x, + (max_y - node.parent.y - 0.5) / max_y, + ) + ax.annotate(node.tree.label, xy_parent, xy, **kwargs) + for child in node.children: + self.recurse(child, tree, ax, max_x, max_y, depth=depth + 1) + + else: + xy_parent = ( + (node.parent.x + 0.5) / max_x, + (max_y - node.parent.y - 0.5) / max_y, + ) + kwargs["bbox"]["fc"] = "grey" + ax.annotate("\n (...) \n", xy_parent, xy, **kwargs) + + +@validate_params( + { + "decision_tree": "no_validation", + "out_file": [str, None, HasMethods("write")], + "max_depth": [Interval(Integral, 0, None, closed="left"), None], + "feature_names": ["array-like", None], + "class_names": ["array-like", "boolean", None], + "label": [StrOptions({"all", "root", "none"})], + "filled": ["boolean"], + "leaves_parallel": ["boolean"], + "impurity": ["boolean"], + "node_ids": ["boolean"], + "proportion": ["boolean"], + "rotate": ["boolean"], + "rounded": ["boolean"], + "special_characters": ["boolean"], + "precision": [Interval(Integral, 0, None, closed="left"), None], + "fontname": [str], + }, + prefer_skip_nested_validation=True, +) +def export_graphviz( + decision_tree, + out_file=None, + *, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + leaves_parallel=False, + impurity=True, + node_ids=False, + proportion=False, + rotate=False, + rounded=False, + special_characters=False, + precision=3, + fontname="helvetica", +): + """Export a decision tree in DOT format. + + This function generates a GraphViz representation of the decision tree, + which is then written into `out_file`. Once exported, graphical renderings + can be generated using, for example:: + + $ dot -Tps tree.dot -o tree.ps (PostScript format) + $ dot -Tpng tree.dot -o tree.png (PNG format) + + The sample counts that are shown are weighted with any sample_weights that + might be present. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + decision_tree : object + The decision tree estimator to be exported to GraphViz. + + out_file : object or str, default=None + Handle or name of the output file. If ``None``, the result is + returned as a string. + + .. versionchanged:: 0.20 + Default of out_file changed from "tree.dot" to None. + + max_depth : int, default=None + The maximum depth of the representation. If None, the tree is fully + generated. + + feature_names : array-like of shape (n_features,), default=None + An array containing the feature names. + If None, generic names will be used ("x[0]", "x[1]", ...). + + class_names : array-like of shape (n_classes,) or bool, default=None + Names of each of the target classes in ascending numerical order. + Only relevant for classification and not supported for multi-output. + If ``True``, shows a symbolic representation of the class name. + + label : {'all', 'root', 'none'}, default='all' + Whether to show informative labels for impurity, etc. + Options include 'all' to show at every node, 'root' to show only at + the top root node, or 'none' to not show at any node. + + filled : bool, default=False + When set to ``True``, paint nodes to indicate majority class for + classification, extremity of values for regression, or purity of node + for multi-output. + + leaves_parallel : bool, default=False + When set to ``True``, draw all leaf nodes at the bottom of the tree. + + impurity : bool, default=True + When set to ``True``, show the impurity at each node. + + node_ids : bool, default=False + When set to ``True``, show the ID number on each node. + + proportion : bool, default=False + When set to ``True``, change the display of 'values' and/or 'samples' + to be proportions and percentages respectively. + + rotate : bool, default=False + When set to ``True``, orient tree left to right rather than top-down. + + rounded : bool, default=False + When set to ``True``, draw node boxes with rounded corners. + + special_characters : bool, default=False + When set to ``False``, ignore special characters for PostScript + compatibility. + + precision : int, default=3 + Number of digits of precision for floating point in the values of + impurity, threshold and value attributes of each node. + + fontname : str, default='helvetica' + Name of font used to render text. + + Returns + ------- + dot_data : str + String representation of the input tree in GraphViz dot format. + Only returned if ``out_file`` is None. + + .. versionadded:: 0.18 + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn import tree + + >>> clf = tree.DecisionTreeClassifier() + >>> iris = load_iris() + + >>> clf = clf.fit(iris.data, iris.target) + >>> tree.export_graphviz(clf) + 'digraph Tree {... + """ + if feature_names is not None: + feature_names = check_array( + feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + if class_names is not None and not isinstance(class_names, bool): + class_names = check_array( + class_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + + check_is_fitted(decision_tree) + own_file = False + return_string = False + try: + if isinstance(out_file, str): + out_file = open(out_file, "w", encoding="utf-8") + own_file = True + + if out_file is None: + return_string = True + out_file = StringIO() + + exporter = _DOTTreeExporter( + out_file=out_file, + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + leaves_parallel=leaves_parallel, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rotate=rotate, + rounded=rounded, + special_characters=special_characters, + precision=precision, + fontname=fontname, + ) + exporter.export(decision_tree) + + if return_string: + return exporter.out_file.getvalue() + + finally: + if own_file: + out_file.close() + + +def _compute_depth(tree, node): + """ + Returns the depth of the subtree rooted in node. + """ + + def compute_depth_( + current_node, current_depth, children_left, children_right, depths + ): + depths += [current_depth] + left = children_left[current_node] + right = children_right[current_node] + if left != -1 and right != -1: + compute_depth_( + left, current_depth + 1, children_left, children_right, depths + ) + compute_depth_( + right, current_depth + 1, children_left, children_right, depths + ) + + depths = [] + compute_depth_(node, 1, tree.children_left, tree.children_right, depths) + return max(depths) + + +@validate_params( + { + "decision_tree": [DecisionTreeClassifier, DecisionTreeRegressor], + "feature_names": ["array-like", None], + "class_names": ["array-like", None], + "max_depth": [Interval(Integral, 0, None, closed="left"), None], + "spacing": [Interval(Integral, 1, None, closed="left"), None], + "decimals": [Interval(Integral, 0, None, closed="left"), None], + "show_weights": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def export_text( + decision_tree, + *, + feature_names=None, + class_names=None, + max_depth=10, + spacing=3, + decimals=2, + show_weights=False, +): + """Build a text report showing the rules of a decision tree. + + Note that backwards compatibility may not be supported. + + Parameters + ---------- + decision_tree : object + The decision tree estimator to be exported. + It can be an instance of + DecisionTreeClassifier or DecisionTreeRegressor. + + feature_names : array-like of shape (n_features,), default=None + An array containing the feature names. + If None generic names will be used ("feature_0", "feature_1", ...). + + class_names : array-like of shape (n_classes,), default=None + Names of each of the target classes in ascending numerical order. + Only relevant for classification and not supported for multi-output. + + - if `None`, the class names are delegated to `decision_tree.classes_`; + - otherwise, `class_names` will be used as class names instead of + `decision_tree.classes_`. The length of `class_names` must match + the length of `decision_tree.classes_`. + + .. versionadded:: 1.3 + + max_depth : int, default=10 + Only the first max_depth levels of the tree are exported. + Truncated branches will be marked with "...". + + spacing : int, default=3 + Number of spaces between edges. The higher it is, the wider the result. + + decimals : int, default=2 + Number of decimal digits to display. + + show_weights : bool, default=False + If true the classification weights will be exported on each leaf. + The classification weights are the number of samples each class. + + Returns + ------- + report : str + Text summary of all the rules in the decision tree. + + Examples + -------- + + >>> from sklearn.datasets import load_iris + >>> from sklearn.tree import DecisionTreeClassifier + >>> from sklearn.tree import export_text + >>> iris = load_iris() + >>> X = iris['data'] + >>> y = iris['target'] + >>> decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2) + >>> decision_tree = decision_tree.fit(X, y) + >>> r = export_text(decision_tree, feature_names=iris['feature_names']) + >>> print(r) + |--- petal width (cm) <= 0.80 + | |--- class: 0 + |--- petal width (cm) > 0.80 + | |--- petal width (cm) <= 1.75 + | | |--- class: 1 + | |--- petal width (cm) > 1.75 + | | |--- class: 2 + """ + if feature_names is not None: + feature_names = check_array( + feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + if class_names is not None: + class_names = check_array( + class_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + + check_is_fitted(decision_tree) + tree_ = decision_tree.tree_ + if is_classifier(decision_tree): + if class_names is None: + class_names = decision_tree.classes_ + elif len(class_names) != len(decision_tree.classes_): + raise ValueError( + "When `class_names` is an array, it should contain as" + " many items as `decision_tree.classes_`. Got" + f" {len(class_names)} while the tree was fitted with" + f" {len(decision_tree.classes_)} classes." + ) + right_child_fmt = "{} {} <= {}\n" + left_child_fmt = "{} {} > {}\n" + truncation_fmt = "{} {}\n" + + if feature_names is not None and len(feature_names) != tree_.n_features: + raise ValueError( + "feature_names must contain %d elements, got %d" + % (tree_.n_features, len(feature_names)) + ) + + if isinstance(decision_tree, DecisionTreeClassifier): + value_fmt = "{}{} weights: {}\n" + if not show_weights: + value_fmt = "{}{}{}\n" + else: + value_fmt = "{}{} value: {}\n" + + if feature_names is not None: + feature_names_ = [ + feature_names[i] if i != _tree.TREE_UNDEFINED else None + for i in tree_.feature + ] + else: + feature_names_ = ["feature_{}".format(i) for i in tree_.feature] + + export_text.report = "" + + def _add_leaf(value, weighted_n_node_samples, class_name, indent): + val = "" + if isinstance(decision_tree, DecisionTreeClassifier): + if show_weights: + val = [ + "{1:.{0}f}, ".format(decimals, v * weighted_n_node_samples) + for v in value + ] + val = "[" + "".join(val)[:-2] + "]" + weighted_n_node_samples + val += " class: " + str(class_name) + else: + val = ["{1:.{0}f}, ".format(decimals, v) for v in value] + val = "[" + "".join(val)[:-2] + "]" + export_text.report += value_fmt.format(indent, "", val) + + def print_tree_recurse(node, depth): + indent = ("|" + (" " * spacing)) * depth + indent = indent[:-spacing] + "-" * spacing + + value = None + if tree_.n_outputs == 1: + value = tree_.value[node][0] + else: + value = tree_.value[node].T[0] + class_name = np.argmax(value) + + if tree_.n_classes[0] != 1 and tree_.n_outputs == 1: + class_name = class_names[class_name] + + weighted_n_node_samples = tree_.weighted_n_node_samples[node] + + if depth <= max_depth + 1: + info_fmt = "" + info_fmt_left = info_fmt + info_fmt_right = info_fmt + + if tree_.feature[node] != _tree.TREE_UNDEFINED: + name = feature_names_[node] + threshold = tree_.threshold[node] + threshold = "{1:.{0}f}".format(decimals, threshold) + export_text.report += right_child_fmt.format(indent, name, threshold) + export_text.report += info_fmt_left + print_tree_recurse(tree_.children_left[node], depth + 1) + + export_text.report += left_child_fmt.format(indent, name, threshold) + export_text.report += info_fmt_right + print_tree_recurse(tree_.children_right[node], depth + 1) + else: # leaf + _add_leaf(value, weighted_n_node_samples, class_name, indent) + else: + subtree_depth = _compute_depth(tree_, node) + if subtree_depth == 1: + _add_leaf(value, weighted_n_node_samples, class_name, indent) + else: + trunc_report = "truncated branch of depth %d" % subtree_depth + export_text.report += truncation_fmt.format(indent, trunc_report) + + print_tree_recurse(0, 1) + return export_text.report diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_reingold_tilford.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_reingold_tilford.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0b6af08bd517b3cd5f9450ebe155d52c6f24eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_reingold_tilford.py @@ -0,0 +1,188 @@ +# Authors: William Mill (bill@billmill.org) +# License: BSD 3 clause + +import numpy as np + + +class DrawTree: + def __init__(self, tree, parent=None, depth=0, number=1): + self.x = -1.0 + self.y = depth + self.tree = tree + self.children = [ + DrawTree(c, self, depth + 1, i + 1) for i, c in enumerate(tree.children) + ] + self.parent = parent + self.thread = None + self.mod = 0 + self.ancestor = self + self.change = self.shift = 0 + self._lmost_sibling = None + # this is the number of the node in its group of siblings 1..n + self.number = number + + def left(self): + return self.thread or len(self.children) and self.children[0] + + def right(self): + return self.thread or len(self.children) and self.children[-1] + + def lbrother(self): + n = None + if self.parent: + for node in self.parent.children: + if node == self: + return n + else: + n = node + return n + + def get_lmost_sibling(self): + if not self._lmost_sibling and self.parent and self != self.parent.children[0]: + self._lmost_sibling = self.parent.children[0] + return self._lmost_sibling + + lmost_sibling = property(get_lmost_sibling) + + def __str__(self): + return "%s: x=%s mod=%s" % (self.tree, self.x, self.mod) + + def __repr__(self): + return self.__str__() + + def max_extents(self): + extents = [c.max_extents() for c in self.children] + extents.append((self.x, self.y)) + return np.max(extents, axis=0) + + +def buchheim(tree): + dt = first_walk(DrawTree(tree)) + min = second_walk(dt) + if min < 0: + third_walk(dt, -min) + return dt + + +def third_walk(tree, n): + tree.x += n + for c in tree.children: + third_walk(c, n) + + +def first_walk(v, distance=1.0): + if len(v.children) == 0: + if v.lmost_sibling: + v.x = v.lbrother().x + distance + else: + v.x = 0.0 + else: + default_ancestor = v.children[0] + for w in v.children: + first_walk(w) + default_ancestor = apportion(w, default_ancestor, distance) + # print("finished v =", v.tree, "children") + execute_shifts(v) + + midpoint = (v.children[0].x + v.children[-1].x) / 2 + + w = v.lbrother() + if w: + v.x = w.x + distance + v.mod = v.x - midpoint + else: + v.x = midpoint + return v + + +def apportion(v, default_ancestor, distance): + w = v.lbrother() + if w is not None: + # in buchheim notation: + # i == inner; o == outer; r == right; l == left; r = +; l = - + vir = vor = v + vil = w + vol = v.lmost_sibling + sir = sor = v.mod + sil = vil.mod + sol = vol.mod + while vil.right() and vir.left(): + vil = vil.right() + vir = vir.left() + vol = vol.left() + vor = vor.right() + vor.ancestor = v + shift = (vil.x + sil) - (vir.x + sir) + distance + if shift > 0: + move_subtree(ancestor(vil, v, default_ancestor), v, shift) + sir = sir + shift + sor = sor + shift + sil += vil.mod + sir += vir.mod + sol += vol.mod + sor += vor.mod + if vil.right() and not vor.right(): + vor.thread = vil.right() + vor.mod += sil - sor + else: + if vir.left() and not vol.left(): + vol.thread = vir.left() + vol.mod += sir - sol + default_ancestor = v + return default_ancestor + + +def move_subtree(wl, wr, shift): + subtrees = wr.number - wl.number + # print(wl.tree, "is conflicted with", wr.tree, 'moving', subtrees, + # 'shift', shift) + # print wl, wr, wr.number, wl.number, shift, subtrees, shift/subtrees + wr.change -= shift / subtrees + wr.shift += shift + wl.change += shift / subtrees + wr.x += shift + wr.mod += shift + + +def execute_shifts(v): + shift = change = 0 + for w in v.children[::-1]: + # print("shift:", w, shift, w.change) + w.x += shift + w.mod += shift + change += w.change + shift += w.shift + change + + +def ancestor(vil, v, default_ancestor): + # the relevant text is at the bottom of page 7 of + # "Improving Walker's Algorithm to Run in Linear Time" by Buchheim et al, + # (2002) + # https://citeseerx.ist.psu.edu/doc_view/pid/1f41c3c2a4880dc49238e46d555f16d28da2940d + if vil.ancestor in v.parent.children: + return vil.ancestor + else: + return default_ancestor + + +def second_walk(v, m=0, depth=0, min=None): + v.x += m + v.y = depth + + if min is None or v.x < min: + min = v.x + + for w in v.children: + min = second_walk(w, m + v.mod, depth + 1, min) + + return min + + +class Tree: + def __init__(self, label="", node_id=-1, *children): + self.label = label + self.node_id = node_id + if children: + self.children = children + else: + self.children = [] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_splitter.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_splitter.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1e32804b63c41f0d08a0af09061b762ca5ca45b0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_splitter.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_splitter.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_splitter.pxd new file mode 100644 index 0000000000000000000000000000000000000000..adc14011cb7a2f54b67ef96f469329b57d022e59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_splitter.pxd @@ -0,0 +1,114 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Joel Nothman +# Arnaud Joly +# Jacob Schreiber +# +# License: BSD 3 clause + +# See _splitter.pyx for details. +cimport numpy as cnp + +from ._criterion cimport Criterion + +from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint32_t + + +cdef struct SplitRecord: + # Data to track sample split + intp_t feature # Which feature to split on. + intp_t pos # Split samples array at the given position, + # # i.e. count of samples below threshold for feature. + # # pos is >= end if the node is a leaf. + float64_t threshold # Threshold to split at. + float64_t improvement # Impurity improvement given parent node. + float64_t impurity_left # Impurity of the left split. + float64_t impurity_right # Impurity of the right split. + float64_t lower_bound # Lower bound on value of both children for monotonicity + float64_t upper_bound # Upper bound on value of both children for monotonicity + unsigned char missing_go_to_left # Controls if missing values go to the left node. + intp_t n_missing # Number of missing values for the feature being split on + +cdef class Splitter: + # The splitter searches in the input space for a feature and a threshold + # to split the samples samples[start:end]. + # + # The impurity computations are delegated to a criterion object. + + # Internal structures + cdef public Criterion criterion # Impurity criterion + cdef public intp_t max_features # Number of features to test + cdef public intp_t min_samples_leaf # Min samples in a leaf + cdef public float64_t min_weight_leaf # Minimum weight in a leaf + + cdef object random_state # Random state + cdef uint32_t rand_r_state # sklearn_rand_r random number state + + cdef intp_t[::1] samples # Sample indices in X, y + cdef intp_t n_samples # X.shape[0] + cdef float64_t weighted_n_samples # Weighted number of samples + cdef intp_t[::1] features # Feature indices in X + cdef intp_t[::1] constant_features # Constant features indices + cdef intp_t n_features # X.shape[1] + cdef float32_t[::1] feature_values # temp. array holding feature values + + cdef intp_t start # Start position for the current node + cdef intp_t end # End position for the current node + + cdef const float64_t[:, ::1] y + # Monotonicity constraints for each feature. + # The encoding is as follows: + # -1: monotonic decrease + # 0: no constraint + # +1: monotonic increase + cdef const cnp.int8_t[:] monotonic_cst + cdef bint with_monotonic_cst + cdef const float64_t[:] sample_weight + + # The samples vector `samples` is maintained by the Splitter object such + # that the samples contained in a node are contiguous. With this setting, + # `node_split` reorganizes the node samples `samples[start:end]` in two + # subsets `samples[start:pos]` and `samples[pos:end]`. + + # The 1-d `features` array of size n_features contains the features + # indices and allows fast sampling without replacement of features. + + # The 1-d `constant_features` array of size n_features holds in + # `constant_features[:n_constant_features]` the feature ids with + # constant values for all the samples that reached a specific node. + # The value `n_constant_features` is given by the parent node to its + # child nodes. The content of the range `[n_constant_features:]` is left + # undefined, but preallocated for performance reasons + # This allows optimization with depth-based tree building. + + # Methods + cdef int init( + self, + object X, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight, + const unsigned char[::1] missing_values_in_feature_mask, + ) except -1 + + cdef int node_reset( + self, + intp_t start, + intp_t end, + float64_t* weighted_n_node_samples + ) except -1 nogil + + cdef int node_split( + self, + float64_t impurity, # Impurity of the node + SplitRecord* split, + intp_t* n_constant_features, + float64_t lower_bound, + float64_t upper_bound, + ) except -1 nogil + + cdef void node_value(self, float64_t* dest) noexcept nogil + + cdef void clip_node_value(self, float64_t* dest, float64_t lower_bound, float64_t upper_bound) noexcept nogil + + cdef float64_t node_impurity(self) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0ca6365fb24e8bd419bdcb4759c778f7c35af6af Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_tree.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_tree.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e4081921f40f972859c086301f19132724b1d867 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_tree.pxd @@ -0,0 +1,114 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Joel Nothman +# Arnaud Joly +# Jacob Schreiber +# Nelson Liu +# +# License: BSD 3 clause + +# See _tree.pyx for details. + +import numpy as np +cimport numpy as cnp + +from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint32_t + +from ._splitter cimport Splitter +from ._splitter cimport SplitRecord + +cdef struct Node: + # Base storage structure for the nodes in a Tree object + + intp_t left_child # id of the left child of the node + intp_t right_child # id of the right child of the node + intp_t feature # Feature used for splitting the node + float64_t threshold # Threshold value at the node + float64_t impurity # Impurity of the node (i.e., the value of the criterion) + intp_t n_node_samples # Number of samples at the node + float64_t weighted_n_node_samples # Weighted number of samples at the node + unsigned char missing_go_to_left # Whether features have missing values + + +cdef class Tree: + # The Tree object is a binary tree structure constructed by the + # TreeBuilder. The tree structure is used for predictions and + # feature importances. + + # Input/Output layout + cdef public intp_t n_features # Number of features in X + cdef intp_t* n_classes # Number of classes in y[:, k] + cdef public intp_t n_outputs # Number of outputs in y + cdef public intp_t max_n_classes # max(n_classes) + + # Inner structures: values are stored separately from node structure, + # since size is determined at runtime. + cdef public intp_t max_depth # Max depth of the tree + cdef public intp_t node_count # Counter for node IDs + cdef public intp_t capacity # Capacity of tree, in terms of nodes + cdef Node* nodes # Array of nodes + cdef float64_t* value # (capacity, n_outputs, max_n_classes) array of values + cdef intp_t value_stride # = n_outputs * max_n_classes + + # Methods + cdef intp_t _add_node(self, intp_t parent, bint is_left, bint is_leaf, + intp_t feature, float64_t threshold, float64_t impurity, + intp_t n_node_samples, + float64_t weighted_n_node_samples, + unsigned char missing_go_to_left) except -1 nogil + cdef int _resize(self, intp_t capacity) except -1 nogil + cdef int _resize_c(self, intp_t capacity=*) except -1 nogil + + cdef cnp.ndarray _get_value_ndarray(self) + cdef cnp.ndarray _get_node_ndarray(self) + + cpdef cnp.ndarray predict(self, object X) + + cpdef cnp.ndarray apply(self, object X) + cdef cnp.ndarray _apply_dense(self, object X) + cdef cnp.ndarray _apply_sparse_csr(self, object X) + + cpdef object decision_path(self, object X) + cdef object _decision_path_dense(self, object X) + cdef object _decision_path_sparse_csr(self, object X) + + cpdef compute_node_depths(self) + cpdef compute_feature_importances(self, normalize=*) + + +# ============================================================================= +# Tree builder +# ============================================================================= + +cdef class TreeBuilder: + # The TreeBuilder recursively builds a Tree object from training samples, + # using a Splitter object for splitting internal nodes and assigning + # values to leaves. + # + # This class controls the various stopping criteria and the node splitting + # evaluation order, e.g. depth-first or best-first. + + cdef Splitter splitter # Splitting algorithm + + cdef intp_t min_samples_split # Minimum number of samples in an internal node + cdef intp_t min_samples_leaf # Minimum number of samples in a leaf + cdef float64_t min_weight_leaf # Minimum weight in a leaf + cdef intp_t max_depth # Maximal tree depth + cdef float64_t min_impurity_decrease # Impurity threshold for early stopping + + cpdef build( + self, + Tree tree, + object X, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight=*, + const unsigned char[::1] missing_values_in_feature_mask=*, + ) + + cdef _check_input( + self, + object X, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_utils.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f8922b78077aec5fccbf71b4dc297e7f9af29b84 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_utils.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_utils.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b59d18879ca9437aa1c860131dfe7cd1e5dfcc78 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/_utils.pxd @@ -0,0 +1,104 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Arnaud Joly +# Jacob Schreiber +# Nelson Liu +# +# License: BSD 3 clause + +# See _utils.pyx for details. + +cimport numpy as cnp +from ._tree cimport Node +from ..neighbors._quad_tree cimport Cell +from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint32_t + +cdef enum: + # Max value for our rand_r replacement (near the bottom). + # We don't use RAND_MAX because it's different across platforms and + # particularly tiny on Windows/MSVC. + # It corresponds to the maximum representable value for + # 32-bit signed integers (i.e. 2^31 - 1). + RAND_R_MAX = 2147483647 + + +# safe_realloc(&p, n) resizes the allocation of p to n * sizeof(*p) bytes or +# raises a MemoryError. It never calls free, since that's __dealloc__'s job. +# cdef float32_t *p = NULL +# safe_realloc(&p, n) +# is equivalent to p = malloc(n * sizeof(*p)) with error checking. +ctypedef fused realloc_ptr: + # Add pointer types here as needed. + (float32_t*) + (intp_t*) + (unsigned char*) + (WeightedPQueueRecord*) + (float64_t*) + (float64_t**) + (Node*) + (Cell*) + (Node**) + +cdef int safe_realloc(realloc_ptr* p, size_t nelems) except -1 nogil + + +cdef cnp.ndarray sizet_ptr_to_ndarray(intp_t* data, intp_t size) + + +cdef intp_t rand_int(intp_t low, intp_t high, + uint32_t* random_state) noexcept nogil + + +cdef float64_t rand_uniform(float64_t low, float64_t high, + uint32_t* random_state) noexcept nogil + + +cdef float64_t log(float64_t x) noexcept nogil + +# ============================================================================= +# WeightedPQueue data structure +# ============================================================================= + +# A record stored in the WeightedPQueue +cdef struct WeightedPQueueRecord: + float64_t data + float64_t weight + +cdef class WeightedPQueue: + cdef intp_t capacity + cdef intp_t array_ptr + cdef WeightedPQueueRecord* array_ + + cdef bint is_empty(self) noexcept nogil + cdef int reset(self) except -1 nogil + cdef intp_t size(self) noexcept nogil + cdef int push(self, float64_t data, float64_t weight) except -1 nogil + cdef int remove(self, float64_t data, float64_t weight) noexcept nogil + cdef int pop(self, float64_t* data, float64_t* weight) noexcept nogil + cdef int peek(self, float64_t* data, float64_t* weight) noexcept nogil + cdef float64_t get_weight_from_index(self, intp_t index) noexcept nogil + cdef float64_t get_value_from_index(self, intp_t index) noexcept nogil + + +# ============================================================================= +# WeightedMedianCalculator data structure +# ============================================================================= + +cdef class WeightedMedianCalculator: + cdef intp_t initial_capacity + cdef WeightedPQueue samples + cdef float64_t total_weight + cdef intp_t k + cdef float64_t sum_w_0_k # represents sum(weights[0:k]) = w[0] + w[1] + ... + w[k-1] + cdef intp_t size(self) noexcept nogil + cdef int push(self, float64_t data, float64_t weight) except -1 nogil + cdef int reset(self) except -1 nogil + cdef int update_median_parameters_post_push( + self, float64_t data, float64_t weight, + float64_t original_median) noexcept nogil + cdef int remove(self, float64_t data, float64_t weight) noexcept nogil + cdef int pop(self, float64_t* data, float64_t* weight) noexcept nogil + cdef int update_median_parameters_post_remove( + self, float64_t data, float64_t weight, + float64_t original_median) noexcept nogil + cdef float64_t get_median(self) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a90151172531dbc6b7ddd48d8dfd328f53a5e326 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_export.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_export.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9266f169e1d3431509936c1ea8fdc1bd8b8b4c5b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_export.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_monotonic_tree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_monotonic_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26aade7fcac71ae18581a83979f61ef74d46b6a7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_monotonic_tree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_reingold_tilford.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_reingold_tilford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f99dff718f7d73d7e81d34d005baed924f1ad92d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_reingold_tilford.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_tree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb40b874f998db906ed29c6a28eb73d5d33cb5e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_tree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_export.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_export.py new file mode 100644 index 0000000000000000000000000000000000000000..169c667b4ff3ff86fb42b3434741098fe4dd1213 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_export.py @@ -0,0 +1,519 @@ +""" +Testing for export functions of decision trees (sklearn.tree.export). +""" +from io import StringIO +from re import finditer, search +from textwrap import dedent + +import numpy as np +import pytest +from numpy.random import RandomState + +from sklearn.base import is_classifier +from sklearn.ensemble import GradientBoostingClassifier +from sklearn.exceptions import NotFittedError +from sklearn.tree import ( + DecisionTreeClassifier, + DecisionTreeRegressor, + export_graphviz, + export_text, + plot_tree, +) + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]] +w = [1, 1, 1, 0.5, 0.5, 0.5] +y_degraded = [1, 1, 1, 1, 1, 1] + + +def test_graphviz_toy(): + # Check correctness of export_graphviz + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="gini", random_state=2 + ) + clf.fit(X, y) + + # Test export code + contents1 = export_graphviz(clf, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]"] ;\n' + '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + # Test plot_options + contents1 = export_graphviz( + clf, + filled=True, + impurity=False, + proportion=True, + special_characters=True, + rounded=True, + out_file=None, + fontname="sans", + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled, rounded", color="black", ' + 'fontname="sans"] ;\n' + 'edge [fontname="sans"] ;\n' + "0 [label=0 ≤ 0.0
samples = 100.0%
" + 'value = [0.5, 0.5]>, fillcolor="#ffffff"] ;\n' + "1 [label=value = [1.0, 0.0]>, " + 'fillcolor="#e58139"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + "2 [label=value = [0.0, 1.0]>, " + 'fillcolor="#399de5"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + # Test max_depth + contents1 = export_graphviz(clf, max_depth=0, class_names=True, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]\\nclass = y[0]"] ;\n' + '1 [label="(...)"] ;\n' + "0 -> 1 ;\n" + '2 [label="(...)"] ;\n' + "0 -> 2 ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test max_depth with plot_options + contents1 = export_graphviz( + clf, max_depth=0, filled=True, out_file=None, node_ids=True + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled", color="black", ' + 'fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="node #0\\nx[0] <= 0.0\\ngini = 0.5\\n' + 'samples = 6\\nvalue = [3, 3]", fillcolor="#ffffff"] ;\n' + '1 [label="(...)", fillcolor="#C0C0C0"] ;\n' + "0 -> 1 ;\n" + '2 [label="(...)", fillcolor="#C0C0C0"] ;\n' + "0 -> 2 ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test multi-output with weighted samples + clf = DecisionTreeClassifier( + max_depth=2, min_samples_split=2, criterion="gini", random_state=2 + ) + clf = clf.fit(X, y2, sample_weight=w) + + contents1 = export_graphviz(clf, filled=True, impurity=False, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled", color="black", ' + 'fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\nsamples = 6\\n' + "value = [[3.0, 1.5, 0.0]\\n" + '[3.0, 1.0, 0.5]]", fillcolor="#ffffff"] ;\n' + '1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' + '[3, 0, 0]]", fillcolor="#e58139"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="x[0] <= 1.5\\nsamples = 3\\n' + "value = [[0.0, 1.5, 0.0]\\n" + '[0.0, 1.0, 0.5]]", fillcolor="#f1bd97"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + '3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' + '[0, 1, 0]]", fillcolor="#e58139"] ;\n' + "2 -> 3 ;\n" + '4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' + '[0.0, 0.0, 0.5]]", fillcolor="#e58139"] ;\n' + "2 -> 4 ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test regression output with plot_options + clf = DecisionTreeRegressor( + max_depth=3, min_samples_split=2, criterion="squared_error", random_state=2 + ) + clf.fit(X, y) + + contents1 = export_graphviz( + clf, + filled=True, + leaves_parallel=True, + out_file=None, + rotate=True, + rounded=True, + fontname="sans", + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled, rounded", color="black", ' + 'fontname="sans"] ;\n' + "graph [ranksep=equally, splines=polyline] ;\n" + 'edge [fontname="sans"] ;\n' + "rankdir=LR ;\n" + '0 [label="x[0] <= 0.0\\nsquared_error = 1.0\\nsamples = 6\\n' + 'value = 0.0", fillcolor="#f2c09c"] ;\n' + '1 [label="squared_error = 0.0\\nsamples = 3\\' + 'nvalue = -1.0", ' + 'fillcolor="#ffffff"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=-45, " + 'headlabel="True"] ;\n' + '2 [label="squared_error = 0.0\\nsamples = 3\\nvalue = 1.0", ' + 'fillcolor="#e58139"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=45, " + 'headlabel="False"] ;\n' + "{rank=same ; 0} ;\n" + "{rank=same ; 1; 2} ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test classifier with degraded learning set + clf = DecisionTreeClassifier(max_depth=3) + clf.fit(X, y_degraded) + + contents1 = export_graphviz(clf, filled=True, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled", color="black", ' + 'fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", ' + 'fillcolor="#ffffff"] ;\n' + "}" + ) + + +@pytest.mark.parametrize("constructor", [list, np.array]) +def test_graphviz_feature_class_names_array_support(constructor): + # Check that export_graphviz treats feature names + # and class names correctly and supports arrays + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="gini", random_state=2 + ) + clf.fit(X, y) + + # Test with feature_names + contents1 = export_graphviz( + clf, feature_names=constructor(["feature0", "feature1"]), out_file=None + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]"] ;\n' + '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + # Test with class_names + contents1 = export_graphviz( + clf, class_names=constructor(["yes", "no"]), out_file=None + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]\\nclass = yes"] ;\n' + '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' + 'class = yes"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' + 'class = no"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + +def test_graphviz_errors(): + # Check for errors of export_graphviz + clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2) + + # Check not-fitted decision tree error + out = StringIO() + with pytest.raises(NotFittedError): + export_graphviz(clf, out) + + clf.fit(X, y) + + # Check if it errors when length of feature_names + # mismatches with number of features + message = "Length of feature_names, 1 does not match number of features, 2" + with pytest.raises(ValueError, match=message): + export_graphviz(clf, None, feature_names=["a"]) + + message = "Length of feature_names, 3 does not match number of features, 2" + with pytest.raises(ValueError, match=message): + export_graphviz(clf, None, feature_names=["a", "b", "c"]) + + # Check error when argument is not an estimator + message = "is not an estimator instance" + with pytest.raises(TypeError, match=message): + export_graphviz(clf.fit(X, y).tree_) + + # Check class_names error + out = StringIO() + with pytest.raises(IndexError): + export_graphviz(clf, out, class_names=[]) + + +def test_friedman_mse_in_graphviz(): + clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0) + clf.fit(X, y) + dot_data = StringIO() + export_graphviz(clf, out_file=dot_data) + + clf = GradientBoostingClassifier(n_estimators=2, random_state=0) + clf.fit(X, y) + for estimator in clf.estimators_: + export_graphviz(estimator[0], out_file=dot_data) + + for finding in finditer(r"\[.*?samples.*?\]", dot_data.getvalue()): + assert "friedman_mse" in finding.group() + + +def test_precision(): + rng_reg = RandomState(2) + rng_clf = RandomState(8) + for X, y, clf in zip( + (rng_reg.random_sample((5, 2)), rng_clf.random_sample((1000, 4))), + (rng_reg.random_sample((5,)), rng_clf.randint(2, size=(1000,))), + ( + DecisionTreeRegressor( + criterion="friedman_mse", random_state=0, max_depth=1 + ), + DecisionTreeClassifier(max_depth=1, random_state=0), + ), + ): + clf.fit(X, y) + for precision in (4, 3): + dot_data = export_graphviz( + clf, out_file=None, precision=precision, proportion=True + ) + + # With the current random state, the impurity and the threshold + # will have the number of precision set in the export_graphviz + # function. We will check the number of precision with a strict + # equality. The value reported will have only 2 precision and + # therefore, only a less equal comparison will be done. + + # check value + for finding in finditer(r"value = \d+\.\d+", dot_data): + assert len(search(r"\.\d+", finding.group()).group()) <= precision + 1 + # check impurity + if is_classifier(clf): + pattern = r"gini = \d+\.\d+" + else: + pattern = r"friedman_mse = \d+\.\d+" + + # check impurity + for finding in finditer(pattern, dot_data): + assert len(search(r"\.\d+", finding.group()).group()) == precision + 1 + # check threshold + for finding in finditer(r"<= \d+\.\d+", dot_data): + assert len(search(r"\.\d+", finding.group()).group()) == precision + 1 + + +def test_export_text_errors(): + clf = DecisionTreeClassifier(max_depth=2, random_state=0) + clf.fit(X, y) + err_msg = "feature_names must contain 2 elements, got 1" + with pytest.raises(ValueError, match=err_msg): + export_text(clf, feature_names=["a"]) + err_msg = ( + "When `class_names` is an array, it should contain as" + " many items as `decision_tree.classes_`. Got 1 while" + " the tree was fitted with 2 classes." + ) + with pytest.raises(ValueError, match=err_msg): + export_text(clf, class_names=["a"]) + + +def test_export_text(): + clf = DecisionTreeClassifier(max_depth=2, random_state=0) + clf.fit(X, y) + + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- class: -1 + |--- feature_1 > 0.00 + | |--- class: 1 + """).lstrip() + + assert export_text(clf) == expected_report + # testing that leaves at level 1 are not truncated + assert export_text(clf, max_depth=0) == expected_report + # testing that the rest of the tree is truncated + assert export_text(clf, max_depth=10) == expected_report + + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- weights: [3.00, 0.00] class: -1 + |--- feature_1 > 0.00 + | |--- weights: [0.00, 3.00] class: 1 + """).lstrip() + assert export_text(clf, show_weights=True) == expected_report + + expected_report = dedent(""" + |- feature_1 <= 0.00 + | |- class: -1 + |- feature_1 > 0.00 + | |- class: 1 + """).lstrip() + assert export_text(clf, spacing=1) == expected_report + + X_l = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, 1]] + y_l = [-1, -1, -1, 1, 1, 1, 2] + clf = DecisionTreeClassifier(max_depth=4, random_state=0) + clf.fit(X_l, y_l) + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- class: -1 + |--- feature_1 > 0.00 + | |--- truncated branch of depth 2 + """).lstrip() + assert export_text(clf, max_depth=0) == expected_report + + X_mo = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] + y_mo = [[-1, -1], [-1, -1], [-1, -1], [1, 1], [1, 1], [1, 1]] + + reg = DecisionTreeRegressor(max_depth=2, random_state=0) + reg.fit(X_mo, y_mo) + + expected_report = dedent(""" + |--- feature_1 <= 0.0 + | |--- value: [-1.0, -1.0] + |--- feature_1 > 0.0 + | |--- value: [1.0, 1.0] + """).lstrip() + assert export_text(reg, decimals=1) == expected_report + assert export_text(reg, decimals=1, show_weights=True) == expected_report + + X_single = [[-2], [-1], [-1], [1], [1], [2]] + reg = DecisionTreeRegressor(max_depth=2, random_state=0) + reg.fit(X_single, y_mo) + + expected_report = dedent(""" + |--- first <= 0.0 + | |--- value: [-1.0, -1.0] + |--- first > 0.0 + | |--- value: [1.0, 1.0] + """).lstrip() + assert export_text(reg, decimals=1, feature_names=["first"]) == expected_report + assert ( + export_text(reg, decimals=1, show_weights=True, feature_names=["first"]) + == expected_report + ) + + +@pytest.mark.parametrize("constructor", [list, np.array]) +def test_export_text_feature_class_names_array_support(constructor): + # Check that export_graphviz treats feature names + # and class names correctly and supports arrays + clf = DecisionTreeClassifier(max_depth=2, random_state=0) + clf.fit(X, y) + + expected_report = dedent(""" + |--- b <= 0.00 + | |--- class: -1 + |--- b > 0.00 + | |--- class: 1 + """).lstrip() + assert export_text(clf, feature_names=constructor(["a", "b"])) == expected_report + + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- class: cat + |--- feature_1 > 0.00 + | |--- class: dog + """).lstrip() + assert export_text(clf, class_names=constructor(["cat", "dog"])) == expected_report + + +def test_plot_tree_entropy(pyplot): + # mostly smoke tests + # Check correctness of export_graphviz for criterion = entropy + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="entropy", random_state=2 + ) + clf.fit(X, y) + + # Test export code + feature_names = ["first feat", "sepal_width"] + nodes = plot_tree(clf, feature_names=feature_names) + assert len(nodes) == 3 + assert ( + nodes[0].get_text() + == "first feat <= 0.0\nentropy = 1.0\nsamples = 6\nvalue = [3, 3]" + ) + assert nodes[1].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [3, 0]" + assert nodes[2].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [0, 3]" + + +def test_plot_tree_gini(pyplot): + # mostly smoke tests + # Check correctness of export_graphviz for criterion = gini + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="gini", random_state=2 + ) + clf.fit(X, y) + + # Test export code + feature_names = ["first feat", "sepal_width"] + nodes = plot_tree(clf, feature_names=feature_names) + assert len(nodes) == 3 + assert ( + nodes[0].get_text() + == "first feat <= 0.0\ngini = 0.5\nsamples = 6\nvalue = [3, 3]" + ) + assert nodes[1].get_text() == "gini = 0.0\nsamples = 3\nvalue = [3, 0]" + assert nodes[2].get_text() == "gini = 0.0\nsamples = 3\nvalue = [0, 3]" + + +def test_not_fitted_tree(pyplot): + # Testing if not fitted tree throws the correct error + clf = DecisionTreeRegressor() + with pytest.raises(NotFittedError): + plot_tree(clf) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_monotonic_tree.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_monotonic_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..6478c2e2dfd85cec5f931578806f305b6167305c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_monotonic_tree.py @@ -0,0 +1,508 @@ +import numpy as np +import pytest + +from sklearn.datasets import make_classification, make_regression +from sklearn.ensemble import ( + ExtraTreesClassifier, + ExtraTreesRegressor, + RandomForestClassifier, + RandomForestRegressor, +) +from sklearn.tree import ( + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) +from sklearn.utils._testing import assert_allclose +from sklearn.utils.fixes import CSC_CONTAINERS + +TREE_CLASSIFIER_CLASSES = [DecisionTreeClassifier, ExtraTreeClassifier] +TREE_REGRESSOR_CLASSES = [DecisionTreeRegressor, ExtraTreeRegressor] +TREE_BASED_CLASSIFIER_CLASSES = TREE_CLASSIFIER_CLASSES + [ + RandomForestClassifier, + ExtraTreesClassifier, +] +TREE_BASED_REGRESSOR_CLASSES = TREE_REGRESSOR_CLASSES + [ + RandomForestRegressor, + ExtraTreesRegressor, +] + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("sparse_splitter", (True, False)) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_monotonic_constraints_classifications( + TreeClassifier, + depth_first_builder, + sparse_splitter, + global_random_seed, + csc_container, +): + n_samples = 1000 + n_samples_train = 900 + X, y = make_classification( + n_samples=n_samples, + n_classes=2, + n_features=5, + n_informative=5, + n_redundant=0, + random_state=global_random_seed, + ) + X_train, y_train = X[:n_samples_train], y[:n_samples_train] + X_test, _ = X[n_samples_train:], y[n_samples_train:] + + X_test_0incr, X_test_0decr = np.copy(X_test), np.copy(X_test) + X_test_1incr, X_test_1decr = np.copy(X_test), np.copy(X_test) + X_test_0incr[:, 0] += 10 + X_test_0decr[:, 0] -= 10 + X_test_1incr[:, 1] += 10 + X_test_1decr[:, 1] -= 10 + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = 1 + monotonic_cst[1] = -1 + + if depth_first_builder: + est = TreeClassifier(max_depth=None, monotonic_cst=monotonic_cst) + else: + est = TreeClassifier( + max_depth=None, + monotonic_cst=monotonic_cst, + max_leaf_nodes=n_samples_train, + ) + if hasattr(est, "random_state"): + est.set_params(**{"random_state": global_random_seed}) + if hasattr(est, "n_estimators"): + est.set_params(**{"n_estimators": 5}) + if sparse_splitter: + X_train = csc_container(X_train) + est.fit(X_train, y_train) + proba_test = est.predict_proba(X_test) + + assert np.logical_and( + proba_test >= 0.0, proba_test <= 1.0 + ).all(), "Probability should always be in [0, 1] range." + assert_allclose(proba_test.sum(axis=1), 1.0) + + # Monotonic increase constraint, it applies to the positive class + assert np.all(est.predict_proba(X_test_0incr)[:, 1] >= proba_test[:, 1]) + assert np.all(est.predict_proba(X_test_0decr)[:, 1] <= proba_test[:, 1]) + + # Monotonic decrease constraint, it applies to the positive class + assert np.all(est.predict_proba(X_test_1incr)[:, 1] <= proba_test[:, 1]) + assert np.all(est.predict_proba(X_test_1decr)[:, 1] >= proba_test[:, 1]) + + +@pytest.mark.parametrize("TreeRegressor", TREE_BASED_REGRESSOR_CLASSES) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("sparse_splitter", (True, False)) +@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error")) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_monotonic_constraints_regressions( + TreeRegressor, + depth_first_builder, + sparse_splitter, + criterion, + global_random_seed, + csc_container, +): + n_samples = 1000 + n_samples_train = 900 + # Build a regression task using 5 informative features + X, y = make_regression( + n_samples=n_samples, + n_features=5, + n_informative=5, + random_state=global_random_seed, + ) + train = np.arange(n_samples_train) + test = np.arange(n_samples_train, n_samples) + X_train = X[train] + y_train = y[train] + X_test = np.copy(X[test]) + X_test_incr = np.copy(X_test) + X_test_decr = np.copy(X_test) + X_test_incr[:, 0] += 10 + X_test_decr[:, 1] += 10 + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = 1 + monotonic_cst[1] = -1 + + if depth_first_builder: + est = TreeRegressor( + max_depth=None, + monotonic_cst=monotonic_cst, + criterion=criterion, + ) + else: + est = TreeRegressor( + max_depth=8, + monotonic_cst=monotonic_cst, + criterion=criterion, + max_leaf_nodes=n_samples_train, + ) + if hasattr(est, "random_state"): + est.set_params(random_state=global_random_seed) + if hasattr(est, "n_estimators"): + est.set_params(**{"n_estimators": 5}) + if sparse_splitter: + X_train = csc_container(X_train) + est.fit(X_train, y_train) + y = est.predict(X_test) + # Monotonic increase constraint + y_incr = est.predict(X_test_incr) + # y_incr should always be greater than y + assert np.all(y_incr >= y) + + # Monotonic decrease constraint + y_decr = est.predict(X_test_decr) + # y_decr should always be lower than y + assert np.all(y_decr <= y) + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +def test_multiclass_raises(TreeClassifier): + X, y = make_classification( + n_samples=100, n_features=5, n_classes=3, n_informative=3, random_state=0 + ) + y[0] = 0 + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = -1 + monotonic_cst[1] = 1 + est = TreeClassifier(max_depth=None, monotonic_cst=monotonic_cst, random_state=0) + + msg = "Monotonicity constraints are not supported with multiclass classification" + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +def test_multiple_output_raises(TreeClassifier): + X = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]] + y = [[1, 0, 1, 0, 1], [1, 0, 1, 0, 1]] + + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-1, 1]), random_state=0 + ) + msg = "Monotonicity constraints are not supported with multiple output" + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize( + "DecisionTreeEstimator", [DecisionTreeClassifier, DecisionTreeRegressor] +) +def test_missing_values_raises(DecisionTreeEstimator): + X, y = make_classification( + n_samples=100, n_features=5, n_classes=2, n_informative=3, random_state=0 + ) + X[0, 0] = np.nan + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = 1 + est = DecisionTreeEstimator( + max_depth=None, monotonic_cst=monotonic_cst, random_state=0 + ) + + msg = "Input X contains NaN" + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +def test_bad_monotonic_cst_raises(TreeClassifier): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + y = [1, 0, 1, 0, 1] + + msg = "monotonic_cst has shape 3 but the input data X has 2 features." + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-1, 1, 0]), random_state=0 + ) + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + msg = "monotonic_cst must be None or an array-like of -1, 0 or 1." + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-2, 2]), random_state=0 + ) + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-1, 0.8]), random_state=0 + ) + with pytest.raises(ValueError, match=msg + "(.*)0.8]"): + est.fit(X, y) + + +def assert_1d_reg_tree_children_monotonic_bounded(tree_, monotonic_sign): + values = tree_.value + for i in range(tree_.node_count): + if tree_.children_left[i] > i and tree_.children_right[i] > i: + # Check monotonicity on children + i_left = tree_.children_left[i] + i_right = tree_.children_right[i] + if monotonic_sign == 1: + assert values[i_left] <= values[i_right] + elif monotonic_sign == -1: + assert values[i_left] >= values[i_right] + val_middle = (values[i_left] + values[i_right]) / 2 + # Check bounds on grand-children, filtering out leaf nodes + if tree_.feature[i_left] >= 0: + i_left_right = tree_.children_right[i_left] + if monotonic_sign == 1: + assert values[i_left_right] <= val_middle + elif monotonic_sign == -1: + assert values[i_left_right] >= val_middle + if tree_.feature[i_right] >= 0: + i_right_left = tree_.children_left[i_right] + if monotonic_sign == 1: + assert val_middle <= values[i_right_left] + elif monotonic_sign == -1: + assert val_middle >= values[i_right_left] + + +def test_assert_1d_reg_tree_children_monotonic_bounded(): + X = np.linspace(-1, 1, 7).reshape(-1, 1) + y = np.sin(2 * np.pi * X.ravel()) + + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y) + + with pytest.raises(AssertionError): + assert_1d_reg_tree_children_monotonic_bounded(reg.tree_, 1) + + with pytest.raises(AssertionError): + assert_1d_reg_tree_children_monotonic_bounded(reg.tree_, -1) + + +def assert_1d_reg_monotonic(clf, monotonic_sign, min_x, max_x, n_steps): + X_grid = np.linspace(min_x, max_x, n_steps).reshape(-1, 1) + y_pred_grid = clf.predict(X_grid) + if monotonic_sign == 1: + assert (np.diff(y_pred_grid) >= 0.0).all() + elif monotonic_sign == -1: + assert (np.diff(y_pred_grid) <= 0.0).all() + + +@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES) +def test_1d_opposite_monotonicity_cst_data(TreeRegressor): + # Check that positive monotonic data with negative monotonic constraint + # yield constant predictions, equal to the average of target values + X = np.linspace(-2, 2, 10).reshape(-1, 1) + y = X.ravel() + clf = TreeRegressor(monotonic_cst=[-1]) + clf.fit(X, y) + assert clf.tree_.node_count == 1 + assert clf.tree_.value[0] == 0.0 + + # Swap monotonicity + clf = TreeRegressor(monotonic_cst=[1]) + clf.fit(X, -y) + assert clf.tree_.node_count == 1 + assert clf.tree_.value[0] == 0.0 + + +@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES) +@pytest.mark.parametrize("monotonic_sign", (-1, 1)) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error")) +def test_1d_tree_nodes_values( + TreeRegressor, monotonic_sign, depth_first_builder, criterion, global_random_seed +): + # Adaptation from test_nodes_values in test_monotonic_constraints.py + # in sklearn.ensemble._hist_gradient_boosting + # Build a single tree with only one feature, and make sure the node + # values respect the monotonicity constraints. + + # Considering the following tree with a monotonic +1 constraint, we + # should have: + # + # root + # / \ + # a b + # / \ / \ + # c d e f + # + # a <= root <= b + # c <= d <= (a + b) / 2 <= e <= f + + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + n_features = 1 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + if depth_first_builder: + # No max_leaf_nodes, default depth first tree builder + clf = TreeRegressor( + monotonic_cst=[monotonic_sign], + criterion=criterion, + random_state=global_random_seed, + ) + else: + # max_leaf_nodes triggers best first tree builder + clf = TreeRegressor( + monotonic_cst=[monotonic_sign], + max_leaf_nodes=n_samples, + criterion=criterion, + random_state=global_random_seed, + ) + clf.fit(X, y) + + assert_1d_reg_tree_children_monotonic_bounded(clf.tree_, monotonic_sign) + assert_1d_reg_monotonic(clf, monotonic_sign, np.min(X), np.max(X), 100) + + +def assert_nd_reg_tree_children_monotonic_bounded(tree_, monotonic_cst): + upper_bound = np.full(tree_.node_count, np.inf) + lower_bound = np.full(tree_.node_count, -np.inf) + for i in range(tree_.node_count): + feature = tree_.feature[i] + node_value = tree_.value[i][0][0] # unpack value from nx1x1 array + # While building the tree, the computed middle value is slightly + # different from the average of the siblings values, because + # sum_right / weighted_n_right + # is slightly different from the value of the right sibling. + # This can cause a discrepancy up to numerical noise when clipping, + # which is resolved by comparing with some loss of precision. + assert np.float32(node_value) <= np.float32(upper_bound[i]) + assert np.float32(node_value) >= np.float32(lower_bound[i]) + + if feature < 0: + # Leaf: nothing to do + continue + + # Split node: check and update bounds for the children. + i_left = tree_.children_left[i] + i_right = tree_.children_right[i] + # unpack value from nx1x1 array + middle_value = (tree_.value[i_left][0][0] + tree_.value[i_right][0][0]) / 2 + + if monotonic_cst[feature] == 0: + # Feature without monotonicity constraint: propagate bounds + # down the tree to both children. + # Otherwise, with 2 features and a monotonic increase constraint + # (encoded by +1) on feature 0, the following tree can be accepted, + # although it does not respect the monotonic increase constraint: + # + # X[0] <= 0 + # value = 100 + # / \ + # X[0] <= -1 X[1] <= 0 + # value = 50 value = 150 + # / \ / \ + # leaf leaf leaf leaf + # value = 25 value = 75 value = 50 value = 250 + + lower_bound[i_left] = lower_bound[i] + upper_bound[i_left] = upper_bound[i] + lower_bound[i_right] = lower_bound[i] + upper_bound[i_right] = upper_bound[i] + + elif monotonic_cst[feature] == 1: + # Feature with constraint: check monotonicity + assert tree_.value[i_left] <= tree_.value[i_right] + + # Propagate bounds down the tree to both children. + lower_bound[i_left] = lower_bound[i] + upper_bound[i_left] = middle_value + lower_bound[i_right] = middle_value + upper_bound[i_right] = upper_bound[i] + + elif monotonic_cst[feature] == -1: + # Feature with constraint: check monotonicity + assert tree_.value[i_left] >= tree_.value[i_right] + + # Update and propagate bounds down the tree to both children. + lower_bound[i_left] = middle_value + upper_bound[i_left] = upper_bound[i] + lower_bound[i_right] = lower_bound[i] + upper_bound[i_right] = middle_value + + else: # pragma: no cover + raise ValueError(f"monotonic_cst[{feature}]={monotonic_cst[feature]}") + + +def test_assert_nd_reg_tree_children_monotonic_bounded(): + # Check that assert_nd_reg_tree_children_monotonic_bounded can detect + # non-monotonic tree predictions. + X = np.linspace(0, 2 * np.pi, 30).reshape(-1, 1) + y = np.sin(X).ravel() + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [1]) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [-1]) + + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [0]) + + # Check that assert_nd_reg_tree_children_monotonic_bounded raises + # when the data (and therefore the model) is naturally monotonic in the + # opposite direction. + X = np.linspace(-5, 5, 5).reshape(-1, 1) + y = X.ravel() ** 3 + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [-1]) + + # For completeness, check that the converse holds when swapping the sign. + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, -y) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [1]) + + +@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES) +@pytest.mark.parametrize("monotonic_sign", (-1, 1)) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error")) +def test_nd_tree_nodes_values( + TreeRegressor, monotonic_sign, depth_first_builder, criterion, global_random_seed +): + # Build tree with several features, and make sure the nodes + # values respect the monotonicity constraints. + + # Considering the following tree with a monotonic increase constraint on X[0], + # we should have: + # + # root + # X[0]<=t + # / \ + # a b + # X[0]<=u X[1]<=v + # / \ / \ + # c d e f + # + # i) a <= root <= b + # ii) c <= a <= d <= (a+b)/2 + # iii) (a+b)/2 <= min(e,f) + # For iii) we check that each node value is within the proper lower and + # upper bounds. + + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + n_features = 2 + monotonic_cst = [monotonic_sign, 0] + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + if depth_first_builder: + # No max_leaf_nodes, default depth first tree builder + clf = TreeRegressor( + monotonic_cst=monotonic_cst, + criterion=criterion, + random_state=global_random_seed, + ) + else: + # max_leaf_nodes triggers best first tree builder + clf = TreeRegressor( + monotonic_cst=monotonic_cst, + max_leaf_nodes=n_samples, + criterion=criterion, + random_state=global_random_seed, + ) + clf.fit(X, y) + assert_nd_reg_tree_children_monotonic_bounded(clf.tree_, monotonic_cst) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_reingold_tilford.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_reingold_tilford.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0ce3ce2cffc2792db28858bff69acb8eb4d45a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_reingold_tilford.py @@ -0,0 +1,49 @@ +import numpy as np +import pytest + +from sklearn.tree._reingold_tilford import Tree, buchheim + +simple_tree = Tree("", 0, Tree("", 1), Tree("", 2)) + +bigger_tree = Tree( + "", + 0, + Tree( + "", + 1, + Tree("", 3), + Tree("", 4, Tree("", 7), Tree("", 8)), + ), + Tree("", 2, Tree("", 5), Tree("", 6)), +) + + +@pytest.mark.parametrize("tree, n_nodes", [(simple_tree, 3), (bigger_tree, 9)]) +def test_buchheim(tree, n_nodes): + def walk_tree(draw_tree): + res = [(draw_tree.x, draw_tree.y)] + for child in draw_tree.children: + # parents higher than children: + assert child.y == draw_tree.y + 1 + res.extend(walk_tree(child)) + if len(draw_tree.children): + # these trees are always binary + # parents are centered above children + assert ( + draw_tree.x == (draw_tree.children[0].x + draw_tree.children[1].x) / 2 + ) + return res + + layout = buchheim(tree) + coordinates = walk_tree(layout) + assert len(coordinates) == n_nodes + # test that x values are unique per depth / level + # we could also do it quicker using defaultdicts.. + depth = 0 + while True: + x_at_this_depth = [node[0] for node in coordinates if node[1] == depth] + if not x_at_this_depth: + # reached all leafs + break + assert len(np.unique(x_at_this_depth)) == len(x_at_this_depth) + depth += 1 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_tree.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..000e684d6a488d459993508188008a38cdfd40d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tree/tests/test_tree.py @@ -0,0 +1,2717 @@ +""" +Testing for the tree module (sklearn.tree). +""" + +import copy +import copyreg +import io +import pickle +import struct +from itertools import chain, product + +import joblib +import numpy as np +import pytest +from joblib.numpy_pickle import NumpyPickler +from numpy.testing import assert_allclose + +from sklearn import clone, datasets, tree +from sklearn.dummy import DummyRegressor +from sklearn.exceptions import NotFittedError +from sklearn.impute import SimpleImputer +from sklearn.metrics import accuracy_score, mean_poisson_deviance, mean_squared_error +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.random_projection import _sparse_random_matrix +from sklearn.tree import ( + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) +from sklearn.tree._classes import ( + CRITERIA_CLF, + CRITERIA_REG, + DENSE_SPLITTERS, + SPARSE_SPLITTERS, +) +from sklearn.tree._tree import ( + NODE_DTYPE, + TREE_LEAF, + TREE_UNDEFINED, + _check_n_classes, + _check_node_ndarray, + _check_value_ndarray, +) +from sklearn.tree._tree import Tree as CythonTree +from sklearn.utils import _IS_32BIT, compute_sample_weight +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + create_memmap_backed_data, + ignore_warnings, + skip_if_32bit, +) +from sklearn.utils.estimator_checks import check_sample_weights_invariance +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import check_random_state + +CLF_CRITERIONS = ("gini", "log_loss") +REG_CRITERIONS = ("squared_error", "absolute_error", "friedman_mse", "poisson") + +CLF_TREES = { + "DecisionTreeClassifier": DecisionTreeClassifier, + "ExtraTreeClassifier": ExtraTreeClassifier, +} + +REG_TREES = { + "DecisionTreeRegressor": DecisionTreeRegressor, + "ExtraTreeRegressor": ExtraTreeRegressor, +} + +ALL_TREES: dict = dict() +ALL_TREES.update(CLF_TREES) +ALL_TREES.update(REG_TREES) + +SPARSE_TREES = [ + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", +] + + +X_small = np.array( + [ + [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0], + [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1], + [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1], + [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1], + [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1], + [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1], + [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1], + [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1], + [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1], + [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0], + [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0], + [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0], + [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0], + [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0], + [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1], + [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1], + [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1], + [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1], + [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1], + [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1], + [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1], + [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1], + [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0], + ] +) + +y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] +y_small_reg = [ + 1.0, + 2.1, + 1.2, + 0.05, + 10, + 2.4, + 3.1, + 1.01, + 0.01, + 2.98, + 3.1, + 1.1, + 0.0, + 1.2, + 2, + 11, + 0, + 0, + 4.5, + 0.201, + 1.06, + 0.9, + 0, +] + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [-1, 1, 1] + +# also load the iris dataset +# and randomly permute it +iris = datasets.load_iris() +rng = np.random.RandomState(1) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +# also load the diabetes dataset +# and randomly permute it +diabetes = datasets.load_diabetes() +perm = rng.permutation(diabetes.target.size) +diabetes.data = diabetes.data[perm] +diabetes.target = diabetes.target[perm] + +digits = datasets.load_digits() +perm = rng.permutation(digits.target.size) +digits.data = digits.data[perm] +digits.target = digits.target[perm] + +random_state = check_random_state(0) +X_multilabel, y_multilabel = datasets.make_multilabel_classification( + random_state=0, n_samples=30, n_features=10 +) + +# NB: despite their names X_sparse_* are numpy arrays (and not sparse matrices) +X_sparse_pos = random_state.uniform(size=(20, 5)) +X_sparse_pos[X_sparse_pos <= 0.8] = 0.0 +y_random = random_state.randint(0, 4, size=(20,)) +X_sparse_mix = _sparse_random_matrix(20, 10, density=0.25, random_state=0).toarray() + + +DATASETS = { + "iris": {"X": iris.data, "y": iris.target}, + "diabetes": {"X": diabetes.data, "y": diabetes.target}, + "digits": {"X": digits.data, "y": digits.target}, + "toy": {"X": X, "y": y}, + "clf_small": {"X": X_small, "y": y_small}, + "reg_small": {"X": X_small, "y": y_small_reg}, + "multilabel": {"X": X_multilabel, "y": y_multilabel}, + "sparse-pos": {"X": X_sparse_pos, "y": y_random}, + "sparse-neg": {"X": -X_sparse_pos, "y": y_random}, + "sparse-mix": {"X": X_sparse_mix, "y": y_random}, + "zeros": {"X": np.zeros((20, 3)), "y": y_random}, +} + + +def assert_tree_equal(d, s, message): + assert ( + s.node_count == d.node_count + ), "{0}: inequal number of node ({1} != {2})".format( + message, s.node_count, d.node_count + ) + + assert_array_equal( + d.children_right, s.children_right, message + ": inequal children_right" + ) + assert_array_equal( + d.children_left, s.children_left, message + ": inequal children_left" + ) + + external = d.children_right == TREE_LEAF + internal = np.logical_not(external) + + assert_array_equal( + d.feature[internal], s.feature[internal], message + ": inequal features" + ) + assert_array_equal( + d.threshold[internal], s.threshold[internal], message + ": inequal threshold" + ) + assert_array_equal( + d.n_node_samples.sum(), + s.n_node_samples.sum(), + message + ": inequal sum(n_node_samples)", + ) + assert_array_equal( + d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples" + ) + + assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") + + assert_array_almost_equal( + d.value[external], s.value[external], err_msg=message + ": inequal value" + ) + + +def test_classification_toy(): + # Check classification on a toy dataset. + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + clf = Tree(max_features=1, random_state=1) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + +def test_weighted_classification_toy(): + # Check classification on a weighted toy dataset. + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + + clf.fit(X, y, sample_weight=np.ones(len(X))) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + clf.fit(X, y, sample_weight=np.full(len(X), 0.5)) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + +@pytest.mark.parametrize("Tree", REG_TREES.values()) +@pytest.mark.parametrize("criterion", REG_CRITERIONS) +def test_regression_toy(Tree, criterion): + # Check regression on a toy dataset. + if criterion == "poisson": + # make target positive while not touching the original y and + # true_result + a = np.abs(np.min(y)) + 1 + y_train = np.array(y) + a + y_test = np.array(true_result) + a + else: + y_train = y + y_test = true_result + + reg = Tree(criterion=criterion, random_state=1) + reg.fit(X, y_train) + assert_allclose(reg.predict(T), y_test) + + clf = Tree(criterion=criterion, max_features=1, random_state=1) + clf.fit(X, y_train) + assert_allclose(reg.predict(T), y_test) + + +def test_xor(): + # Check on a XOR problem + y = np.zeros((10, 10)) + y[:5, :5] = 1 + y[5:, 5:] = 1 + + gridx, gridy = np.indices(y.shape) + + X = np.vstack([gridx.ravel(), gridy.ravel()]).T + y = y.ravel() + + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + clf.fit(X, y) + assert clf.score(X, y) == 1.0, "Failed with {0}".format(name) + + clf = Tree(random_state=0, max_features=1) + clf.fit(X, y) + assert clf.score(X, y) == 1.0, "Failed with {0}".format(name) + + +def test_iris(): + # Check consistency on dataset iris. + for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): + clf = Tree(criterion=criterion, random_state=0) + clf.fit(iris.data, iris.target) + score = accuracy_score(clf.predict(iris.data), iris.target) + assert score > 0.9, "Failed with {0}, criterion = {1} and score = {2}".format( + name, criterion, score + ) + + clf = Tree(criterion=criterion, max_features=2, random_state=0) + clf.fit(iris.data, iris.target) + score = accuracy_score(clf.predict(iris.data), iris.target) + assert score > 0.5, "Failed with {0}, criterion = {1} and score = {2}".format( + name, criterion, score + ) + + +@pytest.mark.parametrize("name, Tree", REG_TREES.items()) +@pytest.mark.parametrize("criterion", REG_CRITERIONS) +def test_diabetes_overfit(name, Tree, criterion): + # check consistency of overfitted trees on the diabetes dataset + # since the trees will overfit, we expect an MSE of 0 + reg = Tree(criterion=criterion, random_state=0) + reg.fit(diabetes.data, diabetes.target) + score = mean_squared_error(diabetes.target, reg.predict(diabetes.data)) + assert score == pytest.approx( + 0 + ), f"Failed with {name}, criterion = {criterion} and score = {score}" + + +@skip_if_32bit +@pytest.mark.parametrize("name, Tree", REG_TREES.items()) +@pytest.mark.parametrize( + "criterion, max_depth, metric, max_loss", + [ + ("squared_error", 15, mean_squared_error, 60), + ("absolute_error", 20, mean_squared_error, 60), + ("friedman_mse", 15, mean_squared_error, 60), + ("poisson", 15, mean_poisson_deviance, 30), + ], +) +def test_diabetes_underfit(name, Tree, criterion, max_depth, metric, max_loss): + # check consistency of trees when the depth and the number of features are + # limited + + reg = Tree(criterion=criterion, max_depth=max_depth, max_features=6, random_state=0) + reg.fit(diabetes.data, diabetes.target) + loss = metric(diabetes.target, reg.predict(diabetes.data)) + assert 0 < loss < max_loss + + +def test_probability(): + # Predict probabilities using DecisionTreeClassifier. + + for name, Tree in CLF_TREES.items(): + clf = Tree(max_depth=1, max_features=1, random_state=42) + clf.fit(iris.data, iris.target) + + prob_predict = clf.predict_proba(iris.data) + assert_array_almost_equal( + np.sum(prob_predict, 1), + np.ones(iris.data.shape[0]), + err_msg="Failed with {0}".format(name), + ) + assert_array_equal( + np.argmax(prob_predict, 1), + clf.predict(iris.data), + err_msg="Failed with {0}".format(name), + ) + assert_almost_equal( + clf.predict_proba(iris.data), + np.exp(clf.predict_log_proba(iris.data)), + 8, + err_msg="Failed with {0}".format(name), + ) + + +def test_arrayrepr(): + # Check the array representation. + # Check resize + X = np.arange(10000)[:, np.newaxis] + y = np.arange(10000) + + for name, Tree in REG_TREES.items(): + reg = Tree(max_depth=None, random_state=0) + reg.fit(X, y) + + +def test_pure_set(): + # Check when y is pure. + X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] + y = [1, 1, 1, 1, 1, 1] + + for name, TreeClassifier in CLF_TREES.items(): + clf = TreeClassifier(random_state=0) + clf.fit(X, y) + assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) + + for name, TreeRegressor in REG_TREES.items(): + reg = TreeRegressor(random_state=0) + reg.fit(X, y) + assert_almost_equal(reg.predict(X), y, err_msg="Failed with {0}".format(name)) + + +def test_numerical_stability(): + # Check numerical stability. + X = np.array( + [ + [152.08097839, 140.40744019, 129.75102234, 159.90493774], + [142.50700378, 135.81935120, 117.82884979, 162.75781250], + [127.28772736, 140.40744019, 129.75102234, 159.90493774], + [132.37025452, 143.71923828, 138.35694885, 157.84558105], + [103.10237122, 143.71928406, 138.35696411, 157.84559631], + [127.71276855, 143.71923828, 138.35694885, 157.84558105], + [120.91514587, 140.40744019, 129.75102234, 159.90493774], + ] + ) + + y = np.array([1.0, 0.70209277, 0.53896582, 0.0, 0.90914464, 0.48026916, 0.49622521]) + + with np.errstate(all="raise"): + for name, Tree in REG_TREES.items(): + reg = Tree(random_state=0) + reg.fit(X, y) + reg.fit(X, -y) + reg.fit(-X, y) + reg.fit(-X, -y) + + +def test_importances(): + # Check variable importances. + X, y = datasets.make_classification( + n_samples=5000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + + clf.fit(X, y) + importances = clf.feature_importances_ + n_important = np.sum(importances > 0.1) + + assert importances.shape[0] == 10, "Failed with {0}".format(name) + assert n_important == 3, "Failed with {0}".format(name) + + # Check on iris that importances are the same for all builders + clf = DecisionTreeClassifier(random_state=0) + clf.fit(iris.data, iris.target) + clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) + clf2.fit(iris.data, iris.target) + + assert_array_equal(clf.feature_importances_, clf2.feature_importances_) + + +def test_importances_raises(): + # Check if variable importance before fit raises ValueError. + clf = DecisionTreeClassifier() + with pytest.raises(ValueError): + getattr(clf, "feature_importances_") + + +def test_importances_gini_equal_squared_error(): + # Check that gini is equivalent to squared_error for binary output variable + + X, y = datasets.make_classification( + n_samples=2000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # The gini index and the mean square error (variance) might differ due + # to numerical instability. Since those instabilities mainly occurs at + # high tree depth, we restrict this maximal depth. + clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit( + X, y + ) + reg = DecisionTreeRegressor( + criterion="squared_error", max_depth=5, random_state=0 + ).fit(X, y) + + assert_almost_equal(clf.feature_importances_, reg.feature_importances_) + assert_array_equal(clf.tree_.feature, reg.tree_.feature) + assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) + assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) + assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) + + +def test_max_features(): + # Check max_features. + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(max_features="sqrt") + est.fit(iris.data, iris.target) + assert est.max_features_ == int(np.sqrt(iris.data.shape[1])) + + est = TreeEstimator(max_features="log2") + est.fit(iris.data, iris.target) + assert est.max_features_ == int(np.log2(iris.data.shape[1])) + + est = TreeEstimator(max_features=1) + est.fit(iris.data, iris.target) + assert est.max_features_ == 1 + + est = TreeEstimator(max_features=3) + est.fit(iris.data, iris.target) + assert est.max_features_ == 3 + + est = TreeEstimator(max_features=0.01) + est.fit(iris.data, iris.target) + assert est.max_features_ == 1 + + est = TreeEstimator(max_features=0.5) + est.fit(iris.data, iris.target) + assert est.max_features_ == int(0.5 * iris.data.shape[1]) + + est = TreeEstimator(max_features=1.0) + est.fit(iris.data, iris.target) + assert est.max_features_ == iris.data.shape[1] + + est = TreeEstimator(max_features=None) + est.fit(iris.data, iris.target) + assert est.max_features_ == iris.data.shape[1] + + +def test_error(): + # Test that it gives proper exception on deficient input. + for name, TreeEstimator in CLF_TREES.items(): + # predict before fit + est = TreeEstimator() + with pytest.raises(NotFittedError): + est.predict_proba(X) + + est.fit(X, y) + X2 = [[-2, -1, 1]] # wrong feature shape for sample + with pytest.raises(ValueError): + est.predict_proba(X2) + + # Wrong dimensions + est = TreeEstimator() + y2 = y[:-1] + with pytest.raises(ValueError): + est.fit(X, y2) + + # Test with arrays that are non-contiguous. + Xf = np.asfortranarray(X) + est = TreeEstimator() + est.fit(Xf, y) + assert_almost_equal(est.predict(T), true_result) + + # predict before fitting + est = TreeEstimator() + with pytest.raises(NotFittedError): + est.predict(T) + + # predict on vector with different dims + est.fit(X, y) + t = np.asarray(T) + with pytest.raises(ValueError): + est.predict(t[:, 1:]) + + # wrong sample shape + Xt = np.array(X).T + + est = TreeEstimator() + est.fit(np.dot(X, Xt), y) + with pytest.raises(ValueError): + est.predict(X) + with pytest.raises(ValueError): + est.apply(X) + + clf = TreeEstimator() + clf.fit(X, y) + with pytest.raises(ValueError): + clf.predict(Xt) + with pytest.raises(ValueError): + clf.apply(Xt) + + # apply before fitting + est = TreeEstimator() + with pytest.raises(NotFittedError): + est.apply(T) + + # non positive target for Poisson splitting Criterion + est = DecisionTreeRegressor(criterion="poisson") + with pytest.raises(ValueError, match="y is not positive.*Poisson"): + est.fit([[0, 1, 2]], [0, 0, 0]) + with pytest.raises(ValueError, match="Some.*y are negative.*Poisson"): + est.fit([[0, 1, 2]], [5, -0.1, 2]) + + +def test_min_samples_split(): + """Test min_samples_split parameter""" + X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE) + y = iris.target + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): + TreeEstimator = ALL_TREES[name] + + # test for integer parameter + est = TreeEstimator( + min_samples_split=10, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + # count samples on nodes, -1 means it is a leaf + node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] + + assert np.min(node_samples) > 9, "Failed with {0}".format(name) + + # test for float parameter + est = TreeEstimator( + min_samples_split=0.2, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + # count samples on nodes, -1 means it is a leaf + node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] + + assert np.min(node_samples) > 9, "Failed with {0}".format(name) + + +def test_min_samples_leaf(): + # Test if leaves contain more than leaf_count training examples + X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE) + y = iris.target + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): + TreeEstimator = ALL_TREES[name] + + # test integer parameter + est = TreeEstimator( + min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + out = est.tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > 4, "Failed with {0}".format(name) + + # test float parameter + est = TreeEstimator( + min_samples_leaf=0.1, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + out = est.tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > 4, "Failed with {0}".format(name) + + +def check_min_weight_fraction_leaf(name, datasets, sparse_container=None): + """Test if leaves contain at least min_weight_fraction_leaf of the + training set""" + X = DATASETS[datasets]["X"].astype(np.float32) + if sparse_container is not None: + X = sparse_container(X) + y = DATASETS[datasets]["y"] + + weights = rng.rand(X.shape[0]) + total_weight = np.sum(weights) + + TreeEstimator = ALL_TREES[name] + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): + est = TreeEstimator( + min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y, sample_weight=weights) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out, weights=weights) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert ( + np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf + ), "Failed with {0} min_weight_fraction_leaf={1}".format( + name, est.min_weight_fraction_leaf + ) + + # test case with no weights passed in + total_weight = X.shape[0] + + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): + est = TreeEstimator( + min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert ( + np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf + ), "Failed with {0} min_weight_fraction_leaf={1}".format( + name, est.min_weight_fraction_leaf + ) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_min_weight_fraction_leaf_on_dense_input(name): + check_min_weight_fraction_leaf(name, "iris") + + +@pytest.mark.parametrize("name", SPARSE_TREES) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_min_weight_fraction_leaf_on_sparse_input(name, csc_container): + check_min_weight_fraction_leaf(name, "multilabel", sparse_container=csc_container) + + +def check_min_weight_fraction_leaf_with_min_samples_leaf( + name, datasets, sparse_container=None +): + """Test the interaction between min_weight_fraction_leaf and + min_samples_leaf when sample_weights is not provided in fit.""" + X = DATASETS[datasets]["X"].astype(np.float32) + if sparse_container is not None: + X = sparse_container(X) + y = DATASETS[datasets]["y"] + + total_weight = X.shape[0] + TreeEstimator = ALL_TREES[name] + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)): + # test integer min_samples_leaf + est = TreeEstimator( + min_weight_fraction_leaf=frac, + max_leaf_nodes=max_leaf_nodes, + min_samples_leaf=5, + random_state=0, + ) + est.fit(X, y) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert np.min(leaf_weights) >= max( + (total_weight * est.min_weight_fraction_leaf), 5 + ), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format( + name, est.min_weight_fraction_leaf, est.min_samples_leaf + ) + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)): + # test float min_samples_leaf + est = TreeEstimator( + min_weight_fraction_leaf=frac, + max_leaf_nodes=max_leaf_nodes, + min_samples_leaf=0.1, + random_state=0, + ) + est.fit(X, y) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert np.min(leaf_weights) >= max( + (total_weight * est.min_weight_fraction_leaf), + (total_weight * est.min_samples_leaf), + ), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format( + name, est.min_weight_fraction_leaf, est.min_samples_leaf + ) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_min_weight_fraction_leaf_with_min_samples_leaf_on_dense_input(name): + check_min_weight_fraction_leaf_with_min_samples_leaf(name, "iris") + + +@pytest.mark.parametrize("name", SPARSE_TREES) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_min_weight_fraction_leaf_with_min_samples_leaf_on_sparse_input( + name, csc_container +): + check_min_weight_fraction_leaf_with_min_samples_leaf( + name, "multilabel", sparse_container=csc_container + ) + + +def test_min_impurity_decrease(global_random_seed): + # test if min_impurity_decrease ensure that a split is made only if + # if the impurity decrease is at least that value + X, y = datasets.make_classification(n_samples=100, random_state=global_random_seed) + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): + TreeEstimator = ALL_TREES[name] + + # Check default value of min_impurity_decrease, 1e-7 + est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0) + # Check with explicit value of 0.05 + est2 = TreeEstimator( + max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.05, random_state=0 + ) + # Check with a much lower value of 0.0001 + est3 = TreeEstimator( + max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.0001, random_state=0 + ) + # Check with a much lower value of 0.1 + est4 = TreeEstimator( + max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.1, random_state=0 + ) + + for est, expected_decrease in ( + (est1, 1e-7), + (est2, 0.05), + (est3, 0.0001), + (est4, 0.1), + ): + assert ( + est.min_impurity_decrease <= expected_decrease + ), "Failed, min_impurity_decrease = {0} > {1}".format( + est.min_impurity_decrease, expected_decrease + ) + est.fit(X, y) + for node in range(est.tree_.node_count): + # If current node is a not leaf node, check if the split was + # justified w.r.t the min_impurity_decrease + if est.tree_.children_left[node] != TREE_LEAF: + imp_parent = est.tree_.impurity[node] + wtd_n_node = est.tree_.weighted_n_node_samples[node] + + left = est.tree_.children_left[node] + wtd_n_left = est.tree_.weighted_n_node_samples[left] + imp_left = est.tree_.impurity[left] + wtd_imp_left = wtd_n_left * imp_left + + right = est.tree_.children_right[node] + wtd_n_right = est.tree_.weighted_n_node_samples[right] + imp_right = est.tree_.impurity[right] + wtd_imp_right = wtd_n_right * imp_right + + wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left + wtd_avg_left_right_imp /= wtd_n_node + + fractional_node_weight = ( + est.tree_.weighted_n_node_samples[node] / X.shape[0] + ) + + actual_decrease = fractional_node_weight * ( + imp_parent - wtd_avg_left_right_imp + ) + + assert ( + actual_decrease >= expected_decrease + ), "Failed with {0} expected min_impurity_decrease={1}".format( + actual_decrease, expected_decrease + ) + + +def test_pickle(): + """Test pickling preserves Tree properties and performance.""" + for name, TreeEstimator in ALL_TREES.items(): + if "Classifier" in name: + X, y = iris.data, iris.target + else: + X, y = diabetes.data, diabetes.target + + est = TreeEstimator(random_state=0) + est.fit(X, y) + score = est.score(X, y) + + # test that all class properties are maintained + attributes = [ + "max_depth", + "node_count", + "capacity", + "n_classes", + "children_left", + "children_right", + "n_leaves", + "feature", + "threshold", + "impurity", + "n_node_samples", + "weighted_n_node_samples", + "value", + ] + fitted_attribute = { + attribute: getattr(est.tree_, attribute) for attribute in attributes + } + + serialized_object = pickle.dumps(est) + est2 = pickle.loads(serialized_object) + assert type(est2) == est.__class__ + + score2 = est2.score(X, y) + assert ( + score == score2 + ), "Failed to generate same score after pickling with {0}".format(name) + for attribute in fitted_attribute: + assert_array_equal( + getattr(est2.tree_, attribute), + fitted_attribute[attribute], + err_msg=( + f"Failed to generate same attribute {attribute} after pickling with" + f" {name}" + ), + ) + + +def test_multioutput(): + # Check estimators on multi-output problems. + X = [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-2, 1], + [-1, 1], + [-1, 2], + [2, -1], + [1, -1], + [1, -2], + ] + + y = [ + [-1, 0], + [-1, 0], + [-1, 0], + [1, 1], + [1, 1], + [1, 1], + [-1, 2], + [-1, 2], + [-1, 2], + [1, 3], + [1, 3], + [1, 3], + ] + + T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] + y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] + + # toy classification problem + for name, TreeClassifier in CLF_TREES.items(): + clf = TreeClassifier(random_state=0) + y_hat = clf.fit(X, y).predict(T) + assert_array_equal(y_hat, y_true) + assert y_hat.shape == (4, 2) + + proba = clf.predict_proba(T) + assert len(proba) == 2 + assert proba[0].shape == (4, 2) + assert proba[1].shape == (4, 4) + + log_proba = clf.predict_log_proba(T) + assert len(log_proba) == 2 + assert log_proba[0].shape == (4, 2) + assert log_proba[1].shape == (4, 4) + + # toy regression problem + for name, TreeRegressor in REG_TREES.items(): + reg = TreeRegressor(random_state=0) + y_hat = reg.fit(X, y).predict(T) + assert_almost_equal(y_hat, y_true) + assert y_hat.shape == (4, 2) + + +def test_classes_shape(): + # Test that n_classes_ and classes_ have proper shape. + for name, TreeClassifier in CLF_TREES.items(): + # Classification, single output + clf = TreeClassifier(random_state=0) + clf.fit(X, y) + + assert clf.n_classes_ == 2 + assert_array_equal(clf.classes_, [-1, 1]) + + # Classification, multi-output + _y = np.vstack((y, np.array(y) * 2)).T + clf = TreeClassifier(random_state=0) + clf.fit(X, _y) + assert len(clf.n_classes_) == 2 + assert len(clf.classes_) == 2 + assert_array_equal(clf.n_classes_, [2, 2]) + assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) + + +def test_unbalanced_iris(): + # Check class rebalancing. + unbalanced_X = iris.data[:125] + unbalanced_y = iris.target[:125] + sample_weight = compute_sample_weight("balanced", unbalanced_y) + + for name, TreeClassifier in CLF_TREES.items(): + clf = TreeClassifier(random_state=0) + clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) + assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) + + +def test_memory_layout(): + # Check that it works no matter the memory layout + for (name, TreeEstimator), dtype in product( + ALL_TREES.items(), [np.float64, np.float32] + ): + est = TreeEstimator(random_state=0) + + # Nothing + X = np.asarray(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # C-order + X = np.asarray(iris.data, order="C", dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # F-order + X = np.asarray(iris.data, order="F", dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # Contiguous + X = np.ascontiguousarray(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # csr + for csr_container in CSR_CONTAINERS: + X = csr_container(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # csc + for csc_container in CSC_CONTAINERS: + X = csc_container(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # Strided + X = np.asarray(iris.data[::3], dtype=dtype) + y = iris.target[::3] + assert_array_equal(est.fit(X, y).predict(X), y) + + +def test_sample_weight(): + # Check sample weighting. + # Test that zero-weighted samples are not taken into account + X = np.arange(100)[:, np.newaxis] + y = np.ones(100) + y[:50] = 0.0 + + sample_weight = np.ones(100) + sample_weight[y == 0] = 0.0 + + clf = DecisionTreeClassifier(random_state=0) + clf.fit(X, y, sample_weight=sample_weight) + assert_array_equal(clf.predict(X), np.ones(100)) + + # Test that low weighted samples are not taken into account at low depth + X = np.arange(200)[:, np.newaxis] + y = np.zeros(200) + y[50:100] = 1 + y[100:200] = 2 + X[100:200, 0] = 200 + + sample_weight = np.ones(200) + + sample_weight[y == 2] = 0.51 # Samples of class '2' are still weightier + clf = DecisionTreeClassifier(max_depth=1, random_state=0) + clf.fit(X, y, sample_weight=sample_weight) + assert clf.tree_.threshold[0] == 149.5 + + sample_weight[y == 2] = 0.5 # Samples of class '2' are no longer weightier + clf = DecisionTreeClassifier(max_depth=1, random_state=0) + clf.fit(X, y, sample_weight=sample_weight) + assert clf.tree_.threshold[0] == 49.5 # Threshold should have moved + + # Test that sample weighting is the same as having duplicates + X = iris.data + y = iris.target + + duplicates = rng.randint(0, X.shape[0], 100) + + clf = DecisionTreeClassifier(random_state=1) + clf.fit(X[duplicates], y[duplicates]) + + sample_weight = np.bincount(duplicates, minlength=X.shape[0]) + clf2 = DecisionTreeClassifier(random_state=1) + clf2.fit(X, y, sample_weight=sample_weight) + + internal = clf.tree_.children_left != tree._tree.TREE_LEAF + assert_array_almost_equal( + clf.tree_.threshold[internal], clf2.tree_.threshold[internal] + ) + + +def test_sample_weight_invalid(): + # Check sample weighting raises errors. + X = np.arange(100)[:, np.newaxis] + y = np.ones(100) + y[:50] = 0.0 + + clf = DecisionTreeClassifier(random_state=0) + + sample_weight = np.random.rand(100, 1) + with pytest.raises(ValueError): + clf.fit(X, y, sample_weight=sample_weight) + + sample_weight = np.array(0) + expected_err = r"Singleton.* cannot be considered a valid collection" + with pytest.raises(TypeError, match=expected_err): + clf.fit(X, y, sample_weight=sample_weight) + + +@pytest.mark.parametrize("name", CLF_TREES) +def test_class_weights(name): + # Test that class_weights resemble sample_weights behavior. + TreeClassifier = CLF_TREES[name] + + # Iris is balanced, so no effect expected for using 'balanced' weights + clf1 = TreeClassifier(random_state=0) + clf1.fit(iris.data, iris.target) + clf2 = TreeClassifier(class_weight="balanced", random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Make a multi-output problem with three copies of Iris + iris_multi = np.vstack((iris.target, iris.target, iris.target)).T + # Create user-defined weights that should balance over the outputs + clf3 = TreeClassifier( + class_weight=[ + {0: 2.0, 1: 2.0, 2: 1.0}, + {0: 2.0, 1: 1.0, 2: 2.0}, + {0: 1.0, 1: 2.0, 2: 2.0}, + ], + random_state=0, + ) + clf3.fit(iris.data, iris_multi) + assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) + # Check against multi-output "auto" which should also have no effect + clf4 = TreeClassifier(class_weight="balanced", random_state=0) + clf4.fit(iris.data, iris_multi) + assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) + + # Inflate importance of class 1, check against user-defined weights + sample_weight = np.ones(iris.target.shape) + sample_weight[iris.target == 1] *= 100 + class_weight = {0: 1.0, 1: 100.0, 2: 1.0} + clf1 = TreeClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight) + clf2 = TreeClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Check that sample_weight and class_weight are multiplicative + clf1 = TreeClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight**2) + clf2 = TreeClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target, sample_weight) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + +@pytest.mark.parametrize("name", CLF_TREES) +def test_class_weight_errors(name): + # Test if class_weight raises errors and warnings when expected. + TreeClassifier = CLF_TREES[name] + _y = np.vstack((y, np.array(y) * 2)).T + + # Incorrect length list for multi-output + clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0) + err_msg = "number of elements in class_weight should match number of outputs." + with pytest.raises(ValueError, match=err_msg): + clf.fit(X, _y) + + +def test_max_leaf_nodes(): + # Test greedy trees with max_depth + 1 leafs. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + k = 4 + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) + assert est.get_n_leaves() == k + 1 + + +def test_max_leaf_nodes_max_depth(): + # Test precedence of max_leaf_nodes over max_depth. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + k = 4 + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) + assert est.get_depth() == 1 + + +def test_arrays_persist(): + # Ensure property arrays' memory stays alive when tree disappears + # non-regression for #2726 + for attr in [ + "n_classes", + "value", + "children_left", + "children_right", + "threshold", + "impurity", + "feature", + "n_node_samples", + ]: + value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr) + # if pointing to freed memory, contents may be arbitrary + assert -3 <= value.flat[0] < 3, "Array points to arbitrary memory" + + +def test_only_constant_features(): + random_state = check_random_state(0) + X = np.zeros((10, 20)) + y = random_state.randint(0, 2, (10,)) + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(random_state=0) + est.fit(X, y) + assert est.tree_.max_depth == 0 + + +def test_behaviour_constant_feature_after_splits(): + X = np.transpose( + np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]], np.zeros((4, 11)))) + ) + y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3] + for name, TreeEstimator in ALL_TREES.items(): + # do not check extra random trees + if "ExtraTree" not in name: + est = TreeEstimator(random_state=0, max_features=1) + est.fit(X, y) + assert est.tree_.max_depth == 2 + assert est.tree_.node_count == 5 + + +def test_with_only_one_non_constant_features(): + X = np.hstack([np.array([[1.0], [1.0], [0.0], [0.0]]), np.zeros((4, 1000))]) + + y = np.array([0.0, 1.0, 0.0, 1.0]) + for name, TreeEstimator in CLF_TREES.items(): + est = TreeEstimator(random_state=0, max_features=1) + est.fit(X, y) + assert est.tree_.max_depth == 1 + assert_array_equal(est.predict_proba(X), np.full((4, 2), 0.5)) + + for name, TreeEstimator in REG_TREES.items(): + est = TreeEstimator(random_state=0, max_features=1) + est.fit(X, y) + assert est.tree_.max_depth == 1 + assert_array_equal(est.predict(X), np.full((4,), 0.5)) + + +def test_big_input(): + # Test if the warning for too large inputs is appropriate. + X = np.repeat(10**40.0, 4).astype(np.float64).reshape(-1, 1) + clf = DecisionTreeClassifier() + with pytest.raises(ValueError, match="float32"): + clf.fit(X, [0, 1, 0, 1]) + + +def test_realloc(): + from sklearn.tree._utils import _realloc_test + + with pytest.raises(MemoryError): + _realloc_test() + + +def test_huge_allocations(): + n_bits = 8 * struct.calcsize("P") + + X = np.random.randn(10, 2) + y = np.random.randint(0, 2, 10) + + # Sanity check: we cannot request more memory than the size of the address + # space. Currently raises OverflowError. + huge = 2 ** (n_bits + 1) + clf = DecisionTreeClassifier(splitter="best", max_leaf_nodes=huge) + with pytest.raises(Exception): + clf.fit(X, y) + + # Non-regression test: MemoryError used to be dropped by Cython + # because of missing "except *". + huge = 2 ** (n_bits - 1) - 1 + clf = DecisionTreeClassifier(splitter="best", max_leaf_nodes=huge) + with pytest.raises(MemoryError): + clf.fit(X, y) + + +def check_sparse_input(tree, dataset, max_depth=None): + TreeEstimator = ALL_TREES[tree] + X = DATASETS[dataset]["X"] + y = DATASETS[dataset]["y"] + + # Gain testing time + if dataset in ["digits", "diabetes"]: + n_samples = X.shape[0] // 5 + X = X[:n_samples] + y = y[:n_samples] + + for sparse_container in COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS: + X_sparse = sparse_container(X) + + # Check the default (depth first search) + d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) + s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) + + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree), + ) + + y_pred = d.predict(X) + if tree in CLF_TREES: + y_proba = d.predict_proba(X) + y_log_proba = d.predict_log_proba(X) + + for sparse_container_test in COO_CONTAINERS + CSR_CONTAINERS + CSC_CONTAINERS: + X_sparse_test = sparse_container_test(X_sparse, dtype=np.float32) + + assert_array_almost_equal(s.predict(X_sparse_test), y_pred) + + if tree in CLF_TREES: + assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) + assert_array_almost_equal( + s.predict_log_proba(X_sparse_test), y_log_proba + ) + + +@pytest.mark.parametrize("tree_type", SPARSE_TREES) +@pytest.mark.parametrize( + "dataset", + ( + "clf_small", + "toy", + "digits", + "multilabel", + "sparse-pos", + "sparse-neg", + "sparse-mix", + "zeros", + ), +) +def test_sparse_input(tree_type, dataset): + max_depth = 3 if dataset == "digits" else None + check_sparse_input(tree_type, dataset, max_depth) + + +@pytest.mark.parametrize("tree_type", sorted(set(SPARSE_TREES).intersection(REG_TREES))) +@pytest.mark.parametrize("dataset", ["diabetes", "reg_small"]) +def test_sparse_input_reg_trees(tree_type, dataset): + # Due to numerical instability of MSE and too strict test, we limit the + # maximal depth + check_sparse_input(tree_type, dataset, 2) + + +@pytest.mark.parametrize("tree_type", SPARSE_TREES) +@pytest.mark.parametrize("dataset", ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_parameters(tree_type, dataset, csc_container): + TreeEstimator = ALL_TREES[tree_type] + X = DATASETS[dataset]["X"] + X_sparse = csc_container(X) + y = DATASETS[dataset]["y"] + + # Check max_features + d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) + s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + # Check min_samples_split + d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) + s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit( + X_sparse, y + ) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + # Check min_samples_leaf + d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) + s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit( + X_sparse, y + ) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + # Check best-first search + d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) + s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + +@pytest.mark.parametrize( + "tree_type, criterion", + list(product([tree for tree in SPARSE_TREES if tree in REG_TREES], REG_CRITERIONS)) + + list( + product([tree for tree in SPARSE_TREES if tree in CLF_TREES], CLF_CRITERIONS) + ), +) +@pytest.mark.parametrize("dataset", ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_criteria(tree_type, dataset, csc_container, criterion): + TreeEstimator = ALL_TREES[tree_type] + X = DATASETS[dataset]["X"] + X_sparse = csc_container(X) + y = DATASETS[dataset]["y"] + + d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) + s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) + + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + +@pytest.mark.parametrize("tree_type", SPARSE_TREES) +@pytest.mark.parametrize( + "csc_container,csr_container", zip(CSC_CONTAINERS, CSR_CONTAINERS) +) +def test_explicit_sparse_zeros(tree_type, csc_container, csr_container): + TreeEstimator = ALL_TREES[tree_type] + max_depth = 3 + n_features = 10 + + # n_samples set n_feature to ease construction of a simultaneous + # construction of a csr and csc matrix + n_samples = n_features + samples = np.arange(n_samples) + + # Generate X, y + random_state = check_random_state(0) + indices = [] + data = [] + offset = 0 + indptr = [offset] + for i in range(n_features): + n_nonzero_i = random_state.binomial(n_samples, 0.5) + indices_i = random_state.permutation(samples)[:n_nonzero_i] + indices.append(indices_i) + data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i,)) - 1 + data.append(data_i) + offset += n_nonzero_i + indptr.append(offset) + + indices = np.concatenate(indices).astype(np.int32) + indptr = np.array(indptr, dtype=np.int32) + data = np.array(np.concatenate(data), dtype=np.float32) + X_sparse = csc_container((data, indices, indptr), shape=(n_samples, n_features)) + X = X_sparse.toarray() + X_sparse_test = csr_container( + (data, indices, indptr), shape=(n_samples, n_features) + ) + X_test = X_sparse_test.toarray() + y = random_state.randint(0, 3, size=(n_samples,)) + + # Ensure that X_sparse_test owns its data, indices and indptr array + X_sparse_test = X_sparse_test.copy() + + # Ensure that we have explicit zeros + assert (X_sparse.data == 0.0).sum() > 0 + assert (X_sparse_test.data == 0.0).sum() > 0 + + # Perform the comparison + d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) + s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) + + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree), + ) + + Xs = (X_test, X_sparse_test) + for X1, X2 in product(Xs, Xs): + assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) + assert_array_almost_equal(s.apply(X1), d.apply(X2)) + assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) + + assert_array_almost_equal( + s.tree_.decision_path(X1).toarray(), d.tree_.decision_path(X2).toarray() + ) + assert_array_almost_equal( + s.decision_path(X1).toarray(), d.decision_path(X2).toarray() + ) + assert_array_almost_equal( + s.decision_path(X1).toarray(), s.tree_.decision_path(X1).toarray() + ) + + assert_array_almost_equal(s.predict(X1), d.predict(X2)) + + if tree in CLF_TREES: + assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) + + +@ignore_warnings +def check_raise_error_on_1d_input(name): + TreeEstimator = ALL_TREES[name] + + X = iris.data[:, 0].ravel() + X_2d = iris.data[:, 0].reshape((-1, 1)) + y = iris.target + + with pytest.raises(ValueError): + TreeEstimator(random_state=0).fit(X, y) + + est = TreeEstimator(random_state=0) + est.fit(X_2d, y) + with pytest.raises(ValueError): + est.predict([X]) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_1d_input(name): + with ignore_warnings(): + check_raise_error_on_1d_input(name) + + +@pytest.mark.parametrize("name", ALL_TREES) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_min_weight_leaf_split_level(name, sparse_container): + TreeEstimator = ALL_TREES[name] + + X = np.array([[0], [0], [0], [0], [1]]) + y = [0, 0, 0, 0, 1] + sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] + if sparse_container is not None: + X = sparse_container(X) + + est = TreeEstimator(random_state=0) + est.fit(X, y, sample_weight=sample_weight) + assert est.tree_.max_depth == 1 + + est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) + est.fit(X, y, sample_weight=sample_weight) + assert est.tree_.max_depth == 0 + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_public_apply_all_trees(name): + X_small32 = X_small.astype(tree._tree.DTYPE, copy=False) + + est = ALL_TREES[name]() + est.fit(X_small, y_small) + assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) + + +@pytest.mark.parametrize("name", SPARSE_TREES) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_public_apply_sparse_trees(name, csr_container): + X_small32 = csr_container(X_small.astype(tree._tree.DTYPE, copy=False)) + + est = ALL_TREES[name]() + est.fit(X_small, y_small) + assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) + + +def test_decision_path_hardcoded(): + X = iris.data + y = iris.target + est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y) + node_indicator = est.decision_path(X[:2]).toarray() + assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]]) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_decision_path(name): + X = iris.data + y = iris.target + n_samples = X.shape[0] + + TreeEstimator = ALL_TREES[name] + est = TreeEstimator(random_state=0, max_depth=2) + est.fit(X, y) + + node_indicator_csr = est.decision_path(X) + node_indicator = node_indicator_csr.toarray() + assert node_indicator.shape == (n_samples, est.tree_.node_count) + + # Assert that leaves index are correct + leaves = est.apply(X) + leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)] + assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) + + # Ensure only one leave node per sample + all_leaves = est.tree_.children_left == TREE_LEAF + assert_array_almost_equal( + np.dot(node_indicator, all_leaves), np.ones(shape=n_samples) + ) + + # Ensure max depth is consistent with sum of indicator + max_depth = node_indicator.sum(axis=1).max() + assert est.tree_.max_depth <= max_depth + + +@pytest.mark.parametrize("name", ALL_TREES) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_no_sparse_y_support(name, csr_container): + # Currently we don't support sparse y + X, y = X_multilabel, csr_container(y_multilabel) + TreeEstimator = ALL_TREES[name] + with pytest.raises(TypeError): + TreeEstimator(random_state=0).fit(X, y) + + +def test_mae(): + """Check MAE criterion produces correct results on small toy dataset: + + ------------------ + | X | y | weight | + ------------------ + | 3 | 3 | 0.1 | + | 5 | 3 | 0.3 | + | 8 | 4 | 1.0 | + | 3 | 6 | 0.6 | + | 5 | 7 | 0.3 | + ------------------ + |sum wt:| 2.3 | + ------------------ + + Because we are dealing with sample weights, we cannot find the median by + simply choosing/averaging the centre value(s), instead we consider the + median where 50% of the cumulative weight is found (in a y sorted data set) + . Therefore with regards to this test data, the cumulative weight is >= 50% + when y = 4. Therefore: + Median = 4 + + For all the samples, we can get the total error by summing: + Absolute(Median - y) * weight + + I.e., total error = (Absolute(4 - 3) * 0.1) + + (Absolute(4 - 3) * 0.3) + + (Absolute(4 - 4) * 1.0) + + (Absolute(4 - 6) * 0.6) + + (Absolute(4 - 7) * 0.3) + = 2.5 + + Impurity = Total error / total weight + = 2.5 / 2.3 + = 1.08695652173913 + ------------------ + + From this root node, the next best split is between X values of 3 and 5. + Thus, we have left and right child nodes: + + LEFT RIGHT + ------------------ ------------------ + | X | y | weight | | X | y | weight | + ------------------ ------------------ + | 3 | 3 | 0.1 | | 5 | 3 | 0.3 | + | 3 | 6 | 0.6 | | 8 | 4 | 1.0 | + ------------------ | 5 | 7 | 0.3 | + |sum wt:| 0.7 | ------------------ + ------------------ |sum wt:| 1.6 | + ------------------ + + Impurity is found in the same way: + Left node Median = 6 + Total error = (Absolute(6 - 3) * 0.1) + + (Absolute(6 - 6) * 0.6) + = 0.3 + + Left Impurity = Total error / total weight + = 0.3 / 0.7 + = 0.428571428571429 + ------------------- + + Likewise for Right node: + Right node Median = 4 + Total error = (Absolute(4 - 3) * 0.3) + + (Absolute(4 - 4) * 1.0) + + (Absolute(4 - 7) * 0.3) + = 1.2 + + Right Impurity = Total error / total weight + = 1.2 / 1.6 + = 0.75 + ------ + """ + dt_mae = DecisionTreeRegressor( + random_state=0, criterion="absolute_error", max_leaf_nodes=2 + ) + + # Test MAE where sample weights are non-uniform (as illustrated above): + dt_mae.fit( + X=[[3], [5], [3], [8], [5]], + y=[6, 7, 3, 4, 3], + sample_weight=[0.6, 0.3, 0.1, 1.0, 0.3], + ) + assert_allclose(dt_mae.tree_.impurity, [2.5 / 2.3, 0.3 / 0.7, 1.2 / 1.6]) + assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0]) + + # Test MAE where all sample weights are uniform: + dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3], sample_weight=np.ones(5)) + assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0]) + assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0]) + + # Test MAE where a `sample_weight` is not explicitly provided. + # This is equivalent to providing uniform sample weights, though + # the internal logic is different: + dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3]) + assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0]) + assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0]) + + +def test_criterion_copy(): + # Let's check whether copy of our criterion has the same type + # and properties as original + n_outputs = 3 + n_classes = np.arange(3, dtype=np.intp) + n_samples = 100 + + def _pickle_copy(obj): + return pickle.loads(pickle.dumps(obj)) + + for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]: + for _, typename in CRITERIA_CLF.items(): + criteria = typename(n_outputs, n_classes) + result = copy_func(criteria).__reduce__() + typename_, (n_outputs_, n_classes_), _ = result + assert typename == typename_ + assert n_outputs == n_outputs_ + assert_array_equal(n_classes, n_classes_) + + for _, typename in CRITERIA_REG.items(): + criteria = typename(n_outputs, n_samples) + result = copy_func(criteria).__reduce__() + typename_, (n_outputs_, n_samples_), _ = result + assert typename == typename_ + assert n_outputs == n_outputs_ + assert n_samples == n_samples_ + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_empty_leaf_infinite_threshold(sparse_container): + # try to make empty leaf by using near infinite value. + data = np.random.RandomState(0).randn(100, 11) * 2e38 + data = np.nan_to_num(data.astype("float32")) + X = data[:, :-1] + if sparse_container is not None: + X = sparse_container(X) + y = data[:, -1] + + tree = DecisionTreeRegressor(random_state=0).fit(X, y) + terminal_regions = tree.apply(X) + left_leaf = set(np.where(tree.tree_.children_left == TREE_LEAF)[0]) + empty_leaf = left_leaf.difference(terminal_regions) + infinite_threshold = np.where(~np.isfinite(tree.tree_.threshold))[0] + assert len(infinite_threshold) == 0 + assert len(empty_leaf) == 0 + + +@pytest.mark.parametrize( + "dataset", sorted(set(DATASETS.keys()) - {"reg_small", "diabetes"}) +) +@pytest.mark.parametrize("tree_cls", [DecisionTreeClassifier, ExtraTreeClassifier]) +def test_prune_tree_classifier_are_subtrees(dataset, tree_cls): + dataset = DATASETS[dataset] + X, y = dataset["X"], dataset["y"] + est = tree_cls(max_leaf_nodes=20, random_state=0) + info = est.cost_complexity_pruning_path(X, y) + + pruning_path = info.ccp_alphas + impurities = info.impurities + assert np.all(np.diff(pruning_path) >= 0) + assert np.all(np.diff(impurities) >= 0) + + assert_pruning_creates_subtree(tree_cls, X, y, pruning_path) + + +@pytest.mark.parametrize("dataset", DATASETS.keys()) +@pytest.mark.parametrize("tree_cls", [DecisionTreeRegressor, ExtraTreeRegressor]) +def test_prune_tree_regression_are_subtrees(dataset, tree_cls): + dataset = DATASETS[dataset] + X, y = dataset["X"], dataset["y"] + + est = tree_cls(max_leaf_nodes=20, random_state=0) + info = est.cost_complexity_pruning_path(X, y) + + pruning_path = info.ccp_alphas + impurities = info.impurities + assert np.all(np.diff(pruning_path) >= 0) + assert np.all(np.diff(impurities) >= 0) + + assert_pruning_creates_subtree(tree_cls, X, y, pruning_path) + + +def test_prune_single_node_tree(): + # single node tree + clf1 = DecisionTreeClassifier(random_state=0) + clf1.fit([[0], [1]], [0, 0]) + + # pruned single node tree + clf2 = DecisionTreeClassifier(random_state=0, ccp_alpha=10) + clf2.fit([[0], [1]], [0, 0]) + + assert_is_subtree(clf1.tree_, clf2.tree_) + + +def assert_pruning_creates_subtree(estimator_cls, X, y, pruning_path): + # generate trees with increasing alphas + estimators = [] + for ccp_alpha in pruning_path: + est = estimator_cls(max_leaf_nodes=20, ccp_alpha=ccp_alpha, random_state=0).fit( + X, y + ) + estimators.append(est) + + # A pruned tree must be a subtree of the previous tree (which had a + # smaller ccp_alpha) + for prev_est, next_est in zip(estimators, estimators[1:]): + assert_is_subtree(prev_est.tree_, next_est.tree_) + + +def assert_is_subtree(tree, subtree): + assert tree.node_count >= subtree.node_count + assert tree.max_depth >= subtree.max_depth + + tree_c_left = tree.children_left + tree_c_right = tree.children_right + subtree_c_left = subtree.children_left + subtree_c_right = subtree.children_right + + stack = [(0, 0)] + while stack: + tree_node_idx, subtree_node_idx = stack.pop() + assert_array_almost_equal( + tree.value[tree_node_idx], subtree.value[subtree_node_idx] + ) + assert_almost_equal( + tree.impurity[tree_node_idx], subtree.impurity[subtree_node_idx] + ) + assert_almost_equal( + tree.n_node_samples[tree_node_idx], subtree.n_node_samples[subtree_node_idx] + ) + assert_almost_equal( + tree.weighted_n_node_samples[tree_node_idx], + subtree.weighted_n_node_samples[subtree_node_idx], + ) + + if subtree_c_left[subtree_node_idx] == subtree_c_right[subtree_node_idx]: + # is a leaf + assert_almost_equal(TREE_UNDEFINED, subtree.threshold[subtree_node_idx]) + else: + # not a leaf + assert_almost_equal( + tree.threshold[tree_node_idx], subtree.threshold[subtree_node_idx] + ) + stack.append((tree_c_left[tree_node_idx], subtree_c_left[subtree_node_idx])) + stack.append( + (tree_c_right[tree_node_idx], subtree_c_right[subtree_node_idx]) + ) + + +@pytest.mark.parametrize("name", ALL_TREES) +@pytest.mark.parametrize("splitter", ["best", "random"]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_apply_path_readonly_all_trees(name, splitter, sparse_container): + dataset = DATASETS["clf_small"] + X_small = dataset["X"].astype(tree._tree.DTYPE, copy=False) + if sparse_container is None: + X_readonly = create_memmap_backed_data(X_small) + else: + X_readonly = sparse_container(dataset["X"]) + + X_readonly.data = np.array(X_readonly.data, dtype=tree._tree.DTYPE) + ( + X_readonly.data, + X_readonly.indices, + X_readonly.indptr, + ) = create_memmap_backed_data( + (X_readonly.data, X_readonly.indices, X_readonly.indptr) + ) + + y_readonly = create_memmap_backed_data(np.array(y_small, dtype=tree._tree.DTYPE)) + est = ALL_TREES[name](splitter=splitter) + est.fit(X_readonly, y_readonly) + assert_array_equal(est.predict(X_readonly), est.predict(X_small)) + assert_array_equal( + est.decision_path(X_readonly).todense(), est.decision_path(X_small).todense() + ) + + +@pytest.mark.parametrize("criterion", ["squared_error", "friedman_mse", "poisson"]) +@pytest.mark.parametrize("Tree", REG_TREES.values()) +def test_balance_property(criterion, Tree): + # Test that sum(y_pred)=sum(y_true) on training set. + # This works if the mean is predicted (should even be true for each leaf). + # MAE predicts the median and is therefore excluded from this test. + + # Choose a training set with non-negative targets (for poisson) + X, y = diabetes.data, diabetes.target + reg = Tree(criterion=criterion) + reg.fit(X, y) + assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y)) + + +@pytest.mark.parametrize("seed", range(3)) +def test_poisson_zero_nodes(seed): + # Test that sum(y)=0 and therefore y_pred=0 is forbidden on nodes. + X = [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 2], [1, 2], [1, 3]] + y = [0, 0, 0, 0, 1, 2, 3, 4] + # Note that X[:, 0] == 0 is a 100% indicator for y == 0. The tree can + # easily learn that: + reg = DecisionTreeRegressor(criterion="squared_error", random_state=seed) + reg.fit(X, y) + assert np.amin(reg.predict(X)) == 0 + # whereas Poisson must predict strictly positive numbers + reg = DecisionTreeRegressor(criterion="poisson", random_state=seed) + reg.fit(X, y) + assert np.all(reg.predict(X) > 0) + + # Test additional dataset where something could go wrong. + n_features = 10 + X, y = datasets.make_regression( + effective_rank=n_features * 2 // 3, + tail_strength=0.6, + n_samples=1_000, + n_features=n_features, + n_informative=n_features * 2 // 3, + random_state=seed, + ) + # some excess zeros + y[(-1 < y) & (y < 0)] = 0 + # make sure the target is positive + y = np.abs(y) + reg = DecisionTreeRegressor(criterion="poisson", random_state=seed) + reg.fit(X, y) + assert np.all(reg.predict(X) > 0) + + +def test_poisson_vs_mse(): + # For a Poisson distributed target, Poisson loss should give better results + # than squared error measured in Poisson deviance as metric. + # We have a similar test, test_poisson(), in + # sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 500, 10 + X = datasets.make_low_rank_matrix( + n_samples=n_train + n_test, n_features=n_features, random_state=rng + ) + # We create a log-linear Poisson model and downscale coef as it will get + # exponentiated. + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + y = rng.poisson(lam=np.exp(X @ coef)) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=rng + ) + # We prevent some overfitting by setting min_samples_split=10. + tree_poi = DecisionTreeRegressor( + criterion="poisson", min_samples_split=10, random_state=rng + ) + tree_mse = DecisionTreeRegressor( + criterion="squared_error", min_samples_split=10, random_state=rng + ) + + tree_poi.fit(X_train, y_train) + tree_mse.fit(X_train, y_train) + dummy = DummyRegressor(strategy="mean").fit(X_train, y_train) + + for X, y, val in [(X_train, y_train, "train"), (X_test, y_test, "test")]: + metric_poi = mean_poisson_deviance(y, tree_poi.predict(X)) + # squared_error might produce non-positive predictions => clip + metric_mse = mean_poisson_deviance(y, np.clip(tree_mse.predict(X), 1e-15, None)) + metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) + # As squared_error might correctly predict 0 in train set, its train + # score can be better than Poisson. This is no longer the case for the + # test set. + if val == "test": + assert metric_poi < 0.5 * metric_mse + assert metric_poi < 0.75 * metric_dummy + + +@pytest.mark.parametrize("criterion", REG_CRITERIONS) +def test_decision_tree_regressor_sample_weight_consistency(criterion): + """Test that the impact of sample_weight is consistent.""" + tree_params = dict(criterion=criterion) + tree = DecisionTreeRegressor(**tree_params, random_state=42) + for kind in ["zeros", "ones"]: + check_sample_weights_invariance( + "DecisionTreeRegressor_" + criterion, tree, kind="zeros" + ) + + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = np.mean(X, axis=1) + rng.rand(n_samples) + # make it positive in order to work also for poisson criterion + y += np.min(y) + 0.1 + + # check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = np.ones(len(y)) + sample_weight_1[: n_samples // 2] = 2 + + tree1 = DecisionTreeRegressor(**tree_params).fit( + X, y, sample_weight=sample_weight_1 + ) + + tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) + + assert tree1.tree_.node_count == tree2.tree_.node_count + # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not + # exactly the same, but on the training set, those differences do not + # matter and thus predictions are the same. + assert_allclose(tree1.predict(X), tree2.predict(X)) + + +@pytest.mark.parametrize("Tree", [DecisionTreeClassifier, ExtraTreeClassifier]) +@pytest.mark.parametrize("n_classes", [2, 4]) +def test_criterion_entropy_same_as_log_loss(Tree, n_classes): + """Test that criterion=entropy gives same as log_loss.""" + n_samples, n_features = 50, 5 + X, y = datasets.make_classification( + n_classes=n_classes, + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + n_redundant=0, + random_state=42, + ) + tree_log_loss = Tree(criterion="log_loss", random_state=43).fit(X, y) + tree_entropy = Tree(criterion="entropy", random_state=43).fit(X, y) + + assert_tree_equal( + tree_log_loss.tree_, + tree_entropy.tree_, + f"{Tree!r} with criterion 'entropy' and 'log_loss' gave different trees.", + ) + assert_allclose(tree_log_loss.predict(X), tree_entropy.predict(X)) + + +def test_different_endianness_pickle(): + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def reduce_ndarray(arr): + return arr.byteswap().view(arr.dtype.newbyteorder()).__reduce__() + + def get_pickle_non_native_endianness(): + f = io.BytesIO() + p = pickle.Pickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[np.ndarray] = reduce_ndarray + + p.dump(clf) + f.seek(0) + return f + + new_clf = pickle.load(get_pickle_non_native_endianness()) + new_score = new_clf.score(X, y) + assert np.isclose(score, new_score) + + +def test_different_endianness_joblib_pickle(): + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + class NonNativeEndiannessNumpyPickler(NumpyPickler): + def save(self, obj): + if isinstance(obj, np.ndarray): + obj = obj.byteswap().view(obj.dtype.newbyteorder()) + super().save(obj) + + def get_joblib_pickle_non_native_endianness(): + f = io.BytesIO() + p = NonNativeEndiannessNumpyPickler(f) + + p.dump(clf) + f.seek(0) + return f + + new_clf = joblib.load(get_joblib_pickle_non_native_endianness()) + new_score = new_clf.score(X, y) + assert np.isclose(score, new_score) + + +def get_different_bitness_node_ndarray(node_ndarray): + new_dtype_for_indexing_fields = np.int64 if _IS_32BIT else np.int32 + + # field names in Node struct with SIZE_t types (see sklearn/tree/_tree.pxd) + indexing_field_names = ["left_child", "right_child", "feature", "n_node_samples"] + + new_dtype_dict = { + name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items() + } + for name in indexing_field_names: + new_dtype_dict[name] = new_dtype_for_indexing_fields + + new_dtype = np.dtype( + {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())} + ) + return node_ndarray.astype(new_dtype, casting="same_kind") + + +def get_different_alignment_node_ndarray(node_ndarray): + new_dtype_dict = { + name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items() + } + offsets = [offset for dtype, offset in node_ndarray.dtype.fields.values()] + shifted_offsets = [8 + offset for offset in offsets] + + new_dtype = np.dtype( + { + "names": list(new_dtype_dict.keys()), + "formats": list(new_dtype_dict.values()), + "offsets": shifted_offsets, + } + ) + return node_ndarray.astype(new_dtype, casting="same_kind") + + +def reduce_tree_with_different_bitness(tree): + new_dtype = np.int64 if _IS_32BIT else np.int32 + tree_cls, (n_features, n_classes, n_outputs), state = tree.__reduce__() + new_n_classes = n_classes.astype(new_dtype, casting="same_kind") + + new_state = state.copy() + new_state["nodes"] = get_different_bitness_node_ndarray(new_state["nodes"]) + + return (tree_cls, (n_features, new_n_classes, n_outputs), new_state) + + +def test_different_bitness_pickle(): + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def pickle_dump_with_different_bitness(): + f = io.BytesIO() + p = pickle.Pickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[CythonTree] = reduce_tree_with_different_bitness + + p.dump(clf) + f.seek(0) + return f + + new_clf = pickle.load(pickle_dump_with_different_bitness()) + new_score = new_clf.score(X, y) + assert score == pytest.approx(new_score) + + +def test_different_bitness_joblib_pickle(): + # Make sure that a platform specific pickle generated on a 64 bit + # platform can be converted at pickle load time into an estimator + # with Cython code that works with the host's native integer precision + # to index nodes in the tree data structure when the host is a 32 bit + # platform (and vice versa). + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def joblib_dump_with_different_bitness(): + f = io.BytesIO() + p = NumpyPickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[CythonTree] = reduce_tree_with_different_bitness + + p.dump(clf) + f.seek(0) + return f + + new_clf = joblib.load(joblib_dump_with_different_bitness()) + new_score = new_clf.score(X, y) + assert score == pytest.approx(new_score) + + +def test_check_n_classes(): + expected_dtype = np.dtype(np.int32) if _IS_32BIT else np.dtype(np.int64) + allowed_dtypes = [np.dtype(np.int32), np.dtype(np.int64)] + allowed_dtypes += [dt.newbyteorder() for dt in allowed_dtypes] + + n_classes = np.array([0, 1], dtype=expected_dtype) + for dt in allowed_dtypes: + _check_n_classes(n_classes.astype(dt), expected_dtype) + + with pytest.raises(ValueError, match="Wrong dimensions.+n_classes"): + wrong_dim_n_classes = np.array([[0, 1]], dtype=expected_dtype) + _check_n_classes(wrong_dim_n_classes, expected_dtype) + + with pytest.raises(ValueError, match="n_classes.+incompatible dtype"): + wrong_dtype_n_classes = n_classes.astype(np.float64) + _check_n_classes(wrong_dtype_n_classes, expected_dtype) + + +def test_check_value_ndarray(): + expected_dtype = np.dtype(np.float64) + expected_shape = (5, 1, 2) + value_ndarray = np.zeros(expected_shape, dtype=expected_dtype) + + allowed_dtypes = [expected_dtype, expected_dtype.newbyteorder()] + + for dt in allowed_dtypes: + _check_value_ndarray( + value_ndarray, expected_dtype=dt, expected_shape=expected_shape + ) + + with pytest.raises(ValueError, match="Wrong shape.+value array"): + _check_value_ndarray( + value_ndarray, expected_dtype=expected_dtype, expected_shape=(1, 2) + ) + + for problematic_arr in [value_ndarray[:, :, :1], np.asfortranarray(value_ndarray)]: + with pytest.raises(ValueError, match="value array.+C-contiguous"): + _check_value_ndarray( + problematic_arr, + expected_dtype=expected_dtype, + expected_shape=problematic_arr.shape, + ) + + with pytest.raises(ValueError, match="value array.+incompatible dtype"): + _check_value_ndarray( + value_ndarray.astype(np.float32), + expected_dtype=expected_dtype, + expected_shape=expected_shape, + ) + + +def test_check_node_ndarray(): + expected_dtype = NODE_DTYPE + + node_ndarray = np.zeros((5,), dtype=expected_dtype) + + valid_node_ndarrays = [ + node_ndarray, + get_different_bitness_node_ndarray(node_ndarray), + get_different_alignment_node_ndarray(node_ndarray), + ] + valid_node_ndarrays += [ + arr.astype(arr.dtype.newbyteorder()) for arr in valid_node_ndarrays + ] + + for arr in valid_node_ndarrays: + _check_node_ndarray(node_ndarray, expected_dtype=expected_dtype) + + with pytest.raises(ValueError, match="Wrong dimensions.+node array"): + problematic_node_ndarray = np.zeros((5, 2), dtype=expected_dtype) + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + with pytest.raises(ValueError, match="node array.+C-contiguous"): + problematic_node_ndarray = node_ndarray[::2] + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + dtype_dict = {name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items()} + + # array with wrong 'threshold' field dtype (int64 rather than float64) + new_dtype_dict = dtype_dict.copy() + new_dtype_dict["threshold"] = np.int64 + + new_dtype = np.dtype( + {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())} + ) + problematic_node_ndarray = node_ndarray.astype(new_dtype) + + with pytest.raises(ValueError, match="node array.+incompatible dtype"): + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + # array with wrong 'left_child' field dtype (float64 rather than int64 or int32) + new_dtype_dict = dtype_dict.copy() + new_dtype_dict["left_child"] = np.float64 + new_dtype = np.dtype( + {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())} + ) + + problematic_node_ndarray = node_ndarray.astype(new_dtype) + + with pytest.raises(ValueError, match="node array.+incompatible dtype"): + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + +@pytest.mark.parametrize( + "Splitter", chain(DENSE_SPLITTERS.values(), SPARSE_SPLITTERS.values()) +) +def test_splitter_serializable(Splitter): + """Check that splitters are serializable.""" + rng = np.random.RandomState(42) + max_features = 10 + n_outputs, n_classes = 2, np.array([3, 2], dtype=np.intp) + + criterion = CRITERIA_CLF["gini"](n_outputs, n_classes) + splitter = Splitter(criterion, max_features, 5, 0.5, rng, monotonic_cst=None) + splitter_serialize = pickle.dumps(splitter) + + splitter_back = pickle.loads(splitter_serialize) + assert splitter_back.max_features == max_features + assert isinstance(splitter_back, Splitter) + + +def test_tree_deserialization_from_read_only_buffer(tmpdir): + """Check that Trees can be deserialized with read only buffers. + + Non-regression test for gh-25584. + """ + pickle_path = str(tmpdir.join("clf.joblib")) + clf = DecisionTreeClassifier(random_state=0) + clf.fit(X_small, y_small) + + joblib.dump(clf, pickle_path) + loaded_clf = joblib.load(pickle_path, mmap_mode="r") + + assert_tree_equal( + loaded_clf.tree_, + clf.tree_, + "The trees of the original and loaded classifiers are not equal.", + ) + + +@pytest.mark.parametrize("Tree", ALL_TREES.values()) +def test_min_sample_split_1_error(Tree): + """Check that an error is raised when min_sample_split=1. + + non-regression test for issue gh-25481. + """ + X = np.array([[0, 0], [1, 1]]) + y = np.array([0, 1]) + + # min_samples_split=1.0 is valid + Tree(min_samples_split=1.0).fit(X, y) + + # min_samples_split=1 is invalid + tree = Tree(min_samples_split=1) + msg = ( + r"'min_samples_split' .* must be an int in the range \[2, inf\) " + r"or a float in the range \(0.0, 1.0\]" + ) + with pytest.raises(ValueError, match=msg): + tree.fit(X, y) + + +@pytest.mark.parametrize("criterion", ["squared_error", "friedman_mse"]) +def test_missing_values_on_equal_nodes_no_missing(criterion): + """Check missing values goes to correct node during predictions""" + X = np.array([[0, 1, 2, 3, 8, 9, 11, 12, 15]]).T + y = np.array([0.1, 0.2, 0.3, 0.2, 1.4, 1.4, 1.5, 1.6, 2.6]) + + dtc = DecisionTreeRegressor(random_state=42, max_depth=1, criterion=criterion) + dtc.fit(X, y) + + # Goes to right node because it has the most data points + y_pred = dtc.predict([[np.nan]]) + assert_allclose(y_pred, [np.mean(y[-5:])]) + + # equal number of elements in both nodes + X_equal = X[:-1] + y_equal = y[:-1] + + dtc = DecisionTreeRegressor(random_state=42, max_depth=1, criterion=criterion) + dtc.fit(X_equal, y_equal) + + # Goes to right node because the implementation sets: + # missing_go_to_left = n_left > n_right, which is False + y_pred = dtc.predict([[np.nan]]) + assert_allclose(y_pred, [np.mean(y_equal[-4:])]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_best_splitter_three_classes(criterion): + """Test when missing values are uniquely present in a class among 3 classes.""" + missing_values_class = 0 + X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 8, 9, 11, 12]]).T + y = np.array([missing_values_class] * 4 + [1] * 4 + [2] * 4) + dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion) + dtc.fit(X, y) + + X_test = np.array([[np.nan, 3, 12]]).T + y_nan_pred = dtc.predict(X_test) + # Missing values necessarily are associated to the observed class. + assert_array_equal(y_nan_pred, [missing_values_class, 1, 2]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_best_splitter_to_left(criterion): + """Missing values spanning only one class at fit-time must make missing + values at predict-time be classified has belonging to this class.""" + X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 4, 5]]).T + y = np.array([0] * 4 + [1] * 6) + + dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion) + dtc.fit(X, y) + + X_test = np.array([[np.nan, 5, np.nan]]).T + y_pred = dtc.predict(X_test) + + assert_array_equal(y_pred, [0, 1, 0]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_best_splitter_to_right(criterion): + """Missing values and non-missing values sharing one class at fit-time + must make missing values at predict-time be classified has belonging + to this class.""" + X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 4, 5]]).T + y = np.array([1] * 4 + [0] * 4 + [1] * 2) + + dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion) + dtc.fit(X, y) + + X_test = np.array([[np.nan, 1.2, 4.8]]).T + y_pred = dtc.predict(X_test) + + assert_array_equal(y_pred, [1, 0, 1]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_missing_both_classes_has_nan(criterion): + """Check behavior of missing value when there is one missing value in each class.""" + X = np.array([[1, 2, 3, 5, np.nan, 10, 20, 30, 60, np.nan]]).T + y = np.array([0] * 5 + [1] * 5) + + dtc = DecisionTreeClassifier(random_state=42, max_depth=1, criterion=criterion) + dtc.fit(X, y) + X_test = np.array([[np.nan, 2.3, 34.2]]).T + y_pred = dtc.predict(X_test) + + # Missing value goes to the class at the right (here 1) because the implementation + # searches right first. + assert_array_equal(y_pred, [1, 0, 1]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize( + "tree", + [ + DecisionTreeClassifier(splitter="random"), + DecisionTreeRegressor(criterion="absolute_error"), + ], +) +def test_missing_value_errors(sparse_container, tree): + """Check unsupported configurations for missing values.""" + + X = np.array([[1, 2, 3, 5, np.nan, 10, 20, 30, 60, np.nan]]).T + y = np.array([0] * 5 + [1] * 5) + + if sparse_container is not None: + X = sparse_container(X) + + with pytest.raises(ValueError, match="Input X contains NaN"): + tree.fit(X, y) + + +def test_missing_values_poisson(): + """Smoke test for poisson regression and missing values.""" + X, y = diabetes.data.copy(), diabetes.target + + # Set some values missing + X[::5, 0] = np.nan + X[::6, -1] = np.nan + + reg = DecisionTreeRegressor(criterion="poisson", random_state=42) + reg.fit(X, y) + + y_pred = reg.predict(X) + assert (y_pred >= 0.0).all() + + +def make_friedman1_classification(*args, **kwargs): + X, y = datasets.make_friedman1(*args, **kwargs) + y = y > 14 + return X, y + + +@pytest.mark.parametrize( + "make_data,Tree", + [ + (datasets.make_friedman1, DecisionTreeRegressor), + (make_friedman1_classification, DecisionTreeClassifier), + ], +) +@pytest.mark.parametrize("sample_weight_train", [None, "ones"]) +def test_missing_values_is_resilience( + make_data, Tree, sample_weight_train, global_random_seed +): + """Check that trees can deal with missing values have decent performance.""" + n_samples, n_features = 5_000, 10 + X, y = make_data( + n_samples=n_samples, n_features=n_features, random_state=global_random_seed + ) + + X_missing = X.copy() + rng = np.random.RandomState(global_random_seed) + X_missing[rng.choice([False, True], size=X.shape, p=[0.9, 0.1])] = np.nan + X_missing_train, X_missing_test, y_train, y_test = train_test_split( + X_missing, y, random_state=global_random_seed + ) + if sample_weight_train == "ones": + sample_weight = np.ones(X_missing_train.shape[0]) + else: + sample_weight = None + + native_tree = Tree(max_depth=10, random_state=global_random_seed) + native_tree.fit(X_missing_train, y_train, sample_weight=sample_weight) + score_native_tree = native_tree.score(X_missing_test, y_test) + + tree_with_imputer = make_pipeline( + SimpleImputer(), Tree(max_depth=10, random_state=global_random_seed) + ) + tree_with_imputer.fit(X_missing_train, y_train) + score_tree_with_imputer = tree_with_imputer.score(X_missing_test, y_test) + + assert ( + score_native_tree > score_tree_with_imputer + ), f"{score_native_tree=} should be strictly greater than {score_tree_with_imputer}" + + +def test_missing_value_is_predictive(): + """Check the tree learns when only the missing value is predictive.""" + rng = np.random.RandomState(0) + n_samples = 1000 + + X = rng.standard_normal(size=(n_samples, 10)) + y = rng.randint(0, high=2, size=n_samples) + + # Create a predictive feature using `y` and with some noise + X_random_mask = rng.choice([False, True], size=n_samples, p=[0.95, 0.05]) + y_mask = y.copy().astype(bool) + y_mask[X_random_mask] = ~y_mask[X_random_mask] + + X_predictive = rng.standard_normal(size=n_samples) + X_predictive[y_mask] = np.nan + + X[:, 5] = X_predictive + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng) + tree = DecisionTreeClassifier(random_state=rng).fit(X_train, y_train) + + assert tree.score(X_train, y_train) >= 0.85 + assert tree.score(X_test, y_test) >= 0.85 + + +@pytest.mark.parametrize( + "make_data, Tree", + [ + (datasets.make_regression, DecisionTreeRegressor), + (datasets.make_classification, DecisionTreeClassifier), + ], +) +def test_sample_weight_non_uniform(make_data, Tree): + """Check sample weight is correctly handled with missing values.""" + rng = np.random.RandomState(0) + n_samples, n_features = 1000, 10 + X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng) + + # Create dataset with missing values + X[rng.choice([False, True], size=X.shape, p=[0.9, 0.1])] = np.nan + + # Zero sample weight is the same as removing the sample + sample_weight = np.ones(X.shape[0]) + sample_weight[::2] = 0.0 + + tree_with_sw = Tree(random_state=0) + tree_with_sw.fit(X, y, sample_weight=sample_weight) + + tree_samples_removed = Tree(random_state=0) + tree_samples_removed.fit(X[1::2, :], y[1::2]) + + assert_allclose(tree_samples_removed.predict(X), tree_with_sw.predict(X)) + + +def test_deterministic_pickle(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/27268 + # Uninitialised memory would lead to the two pickle strings being different. + tree1 = DecisionTreeClassifier(random_state=0).fit(iris.data, iris.target) + tree2 = DecisionTreeClassifier(random_state=0).fit(iris.data, iris.target) + + pickle1 = pickle.dumps(tree1) + pickle2 = pickle.dumps(tree2) + + assert pickle1 == pickle2 + + +@pytest.mark.parametrize( + "X", + [ + # missing values will go left for greedy splits + np.array([np.nan, 2, np.nan, 4, 5, 6]), + np.array([np.nan, np.nan, 3, 4, 5, 6]), + # missing values will go right for greedy splits + np.array([1, 2, 3, 4, np.nan, np.nan]), + np.array([1, 2, 3, np.nan, 6, np.nan]), + ], +) +@pytest.mark.parametrize("criterion", ["squared_error", "friedman_mse"]) +def test_regression_tree_missing_values_toy(X, criterion): + """Check that we properly handle missing values in regression trees using a toy + dataset. + + The regression targeted by this test was that we were not reinitializing the + criterion when it comes to the number of missing values. Therefore, the value + of the critetion (i.e. MSE) was completely wrong. + + This test check that the MSE is null when there is a single sample in the leaf. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28254 + https://github.com/scikit-learn/scikit-learn/issues/28316 + """ + X = X.reshape(-1, 1) + y = np.arange(6) + + tree = DecisionTreeRegressor(criterion=criterion, random_state=0).fit(X, y) + tree_ref = clone(tree).fit(y.reshape(-1, 1), y) + assert all(tree.tree_.impurity >= 0) # MSE should always be positive + # Check the impurity match after the first split + assert_allclose(tree.tree_.impurity[:2], tree_ref.tree_.impurity[:2]) + + # Find the leaves with a single sample where the MSE should be 0 + leaves_idx = np.flatnonzero( + (tree.tree_.children_left == -1) & (tree.tree_.n_node_samples == 1) + ) + assert_allclose(tree.tree_.impurity[leaves_idx], 0.0) + + +def test_classification_tree_missing_values_toy(): + """Check that we properly handle missing values in clasification trees using a toy + dataset. + + The test is more involved because we use a case where we detected a regression + in a random forest. We therefore define the seed and bootstrap indices to detect + one of the non-frequent regression. + + Here, we check that the impurity is null or positive in the leaves. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28254 + """ + X, y = datasets.load_iris(return_X_y=True) + + rng = np.random.RandomState(42) + X_missing = X.copy() + mask = rng.binomial( + n=np.ones(shape=(1, 4), dtype=np.int32), p=X[:, [2]] / 8 + ).astype(bool) + X_missing[mask] = np.nan + X_train, _, y_train, _ = train_test_split(X_missing, y, random_state=13) + + # fmt: off + # no black reformatting for this specific array + indices = np.array([ + 2, 81, 39, 97, 91, 38, 46, 31, 101, 13, 89, 82, 100, 42, 69, 27, 81, 16, 73, 74, + 51, 47, 107, 17, 75, 110, 20, 15, 104, 57, 26, 15, 75, 79, 35, 77, 90, 51, 46, + 13, 94, 91, 23, 8, 93, 93, 73, 77, 12, 13, 74, 109, 110, 24, 10, 23, 104, 27, + 92, 52, 20, 109, 8, 8, 28, 27, 35, 12, 12, 7, 43, 0, 30, 31, 78, 12, 24, 105, + 50, 0, 73, 12, 102, 105, 13, 31, 1, 69, 11, 32, 75, 90, 106, 94, 60, 56, 35, 17, + 62, 85, 81, 39, 80, 16, 63, 6, 80, 84, 3, 3, 76, 78 + ], dtype=np.int32) + # fmt: on + + tree = DecisionTreeClassifier( + max_depth=3, max_features="sqrt", random_state=1857819720 + ) + tree.fit(X_train[indices], y_train[indices]) + assert all(tree.tree_.impurity >= 0) + + leaves_idx = np.flatnonzero( + (tree.tree_.children_left == -1) & (tree.tree_.n_node_samples == 1) + ) + assert_allclose(tree.tree_.impurity[leaves_idx], 0.0) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so b/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..908ebcc7a53f2dceb1a35e6dc95cb97e6dfa45b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6a6296a419bf448fcf992fb808b9a2177a41144028efb8441ec212078826a38 +size 854964849