diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f1410aec58f420551560e354446d1ffd1b0c58a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py new file mode 100644 index 0000000000000000000000000000000000000000..ed99a38c0ee56d7fb2222204612dce09529b670d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py @@ -0,0 +1,267 @@ +# Author: Virgile Fritsch +# +# License: BSD 3 clause + +from numbers import Real + +import numpy as np + +from ..base import OutlierMixin, _fit_context +from ..metrics import accuracy_score +from ..utils._param_validation import Interval +from ..utils.validation import check_is_fitted +from ._robust_covariance import MinCovDet + + +class EllipticEnvelope(OutlierMixin, MinCovDet): + """An object for detecting outliers in a Gaussian distributed dataset. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, the support of robust location and covariance estimates + is computed, and a covariance estimate is recomputed from it, + without centering the data. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, the robust location and covariance are directly computed + with the FastMCD algorithm without additional treatment. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. If None, the minimum value of support_fraction will + be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`. + Range is (0, 1). + + contamination : float, default=0.1 + The amount of contamination of the data set, i.e. the proportion + of outliers in the data set. Range is (0, 0.5]. + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling + the data. Pass an int for reproducible results across multiple function + calls. See :term:`Glossary `. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated robust location. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated robust covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute the + robust estimates of location and shape. + + offset_ : float + Offset used to define the decision function from the raw scores. + We have the relation: ``decision_function = score_samples - offset_``. + The offset depends on the contamination parameter and is defined in + such a way we obtain the expected number of outliers (samples with + decision function < 0) in training. + + .. versionadded:: 0.20 + + raw_location_ : ndarray of shape (n_features,) + The raw robust estimated location before correction and re-weighting. + + raw_covariance_ : ndarray of shape (n_features, n_features) + The raw robust estimated covariance before correction and re-weighting. + + raw_support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the raw robust estimates of location and shape, before correction + and re-weighting. + + dist_ : ndarray of shape (n_samples,) + Mahalanobis distances of the training set (on which :meth:`fit` is + called) observations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + Outlier detection from covariance estimation may break or not + perform well in high-dimensional settings. In particular, one will + always take care to work with ``n_samples > n_features ** 2``. + + References + ---------- + .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the + minimum covariance determinant estimator" Technometrics 41(3), 212 + (1999) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import EllipticEnvelope + >>> true_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0], + ... cov=true_cov, + ... size=500) + >>> cov = EllipticEnvelope(random_state=0).fit(X) + >>> # predict returns 1 for an inlier and -1 for an outlier + >>> cov.predict([[0, 0], + ... [3, 3]]) + array([ 1, -1]) + >>> cov.covariance_ + array([[0.7411..., 0.2535...], + [0.2535..., 0.3053...]]) + >>> cov.location_ + array([0.0813... , 0.0427...]) + """ + + _parameter_constraints: dict = { + **MinCovDet._parameter_constraints, + "contamination": [Interval(Real, 0, 0.5, closed="right")], + } + + def __init__( + self, + *, + store_precision=True, + assume_centered=False, + support_fraction=None, + contamination=0.1, + random_state=None, + ): + super().__init__( + store_precision=store_precision, + assume_centered=assume_centered, + support_fraction=support_fraction, + random_state=random_state, + ) + self.contamination = contamination + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the EllipticEnvelope model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + super().fit(X) + self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination) + return self + + def decision_function(self, X): + """Compute the decision function of the given observations. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + decision : ndarray of shape (n_samples,) + Decision function of the samples. + It is equal to the shifted Mahalanobis distances. + The threshold for being an outlier is 0, which ensures a + compatibility with other outlier detection algorithms. + """ + check_is_fitted(self) + negative_mahal_dist = self.score_samples(X) + return negative_mahal_dist - self.offset_ + + def score_samples(self, X): + """Compute the negative Mahalanobis distances. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + negative_mahal_distances : array-like of shape (n_samples,) + Opposite of the Mahalanobis distances. + """ + check_is_fitted(self) + return -self.mahalanobis(X) + + def predict(self, X): + """ + Predict labels (1 inlier, -1 outlier) of X according to fitted model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + Returns -1 for anomalies/outliers and +1 for inliers. + """ + values = self.decision_function(X) + is_inlier = np.full(values.shape[0], -1, dtype=int) + is_inlier[values >= 0] = 1 + + return is_inlier + + def score(self, X, y, sample_weight=None): + """Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of self.predict(X) w.r.t. y. + """ + return accuracy_score(y, self.predict(X), sample_weight=sample_weight) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d72b393290724354c449323f09454c7c94fdef5d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa1699f82b2d6b0a7e3f641f2519f8df2d420887 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f6f6a611ef0e6255623537b5d3a65f5325df828 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a745b4e9ffea29c3e9fd4926d53d88f12b8bac5a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94450b89fa12638e4d0fea617232d1d486d752eb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..ef4bd63149d60624d113a5659248a904fa4679e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py @@ -0,0 +1,377 @@ +# Author: Alexandre Gramfort +# Gael Varoquaux +# Virgile Fritsch +# +# License: BSD 3 clause + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.covariance import ( + OAS, + EmpiricalCovariance, + LedoitWolf, + ShrunkCovariance, + empirical_covariance, + ledoit_wolf, + ledoit_wolf_shrinkage, + oas, + shrunk_covariance, +) +from sklearn.covariance._shrunk_covariance import _ledoit_wolf +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +from .._shrunk_covariance import _oas + +X, _ = datasets.load_diabetes(return_X_y=True) +X_1d = X[:, 0] +n_samples, n_features = X.shape + + +def test_covariance(): + # Tests Covariance module on a simple dataset. + # test covariance fit from data + cov = EmpiricalCovariance() + cov.fit(X) + emp_cov = empirical_covariance(X) + assert_array_almost_equal(emp_cov, cov.covariance_, 4) + assert_almost_equal(cov.error_norm(emp_cov), 0) + assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0) + assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0) + assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0) + assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0) + with pytest.raises(NotImplementedError): + cov.error_norm(emp_cov, norm="foo") + # Mahalanobis distances computation test + mahal_dist = cov.mahalanobis(X) + assert np.amin(mahal_dist) > 0 + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + cov = EmpiricalCovariance() + cov.fit(X_1d) + assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4) + assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0) + assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0) + + # test with one sample + # Create X with 1 sample and 5 features + X_1sample = np.arange(5).reshape(1, 5) + cov = EmpiricalCovariance() + warn_msg = "Only one sample available. You may want to reshape your data array" + with pytest.warns(UserWarning, match=warn_msg): + cov.fit(X_1sample) + + assert_array_almost_equal(cov.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) + + # test integer type + X_integer = np.asarray([[0, 1], [1, 0]]) + result = np.asarray([[0.25, -0.25], [-0.25, 0.25]]) + assert_array_almost_equal(empirical_covariance(X_integer), result) + + # test centered case + cov = EmpiricalCovariance(assume_centered=True) + cov.fit(X) + assert_array_equal(cov.location_, np.zeros(X.shape[1])) + + +@pytest.mark.parametrize("n_matrices", [1, 3]) +def test_shrunk_covariance_func(n_matrices): + """Check `shrunk_covariance` function.""" + + n_features = 2 + cov = np.ones((n_features, n_features)) + cov_target = np.array([[1, 0.5], [0.5, 1]]) + + if n_matrices > 1: + cov = np.repeat(cov[np.newaxis, ...], n_matrices, axis=0) + cov_target = np.repeat(cov_target[np.newaxis, ...], n_matrices, axis=0) + + cov_shrunk = shrunk_covariance(cov, 0.5) + assert_allclose(cov_shrunk, cov_target) + + +def test_shrunk_covariance(): + """Check consistency between `ShrunkCovariance` and `shrunk_covariance`.""" + + # Tests ShrunkCovariance module on a simple dataset. + # compare shrunk covariance obtained from data and from MLE estimate + cov = ShrunkCovariance(shrinkage=0.5) + cov.fit(X) + assert_array_almost_equal( + shrunk_covariance(empirical_covariance(X), shrinkage=0.5), cov.covariance_, 4 + ) + + # same test with shrinkage not provided + cov = ShrunkCovariance() + cov.fit(X) + assert_array_almost_equal( + shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4 + ) + + # same test with shrinkage = 0 (<==> empirical_covariance) + cov = ShrunkCovariance(shrinkage=0.0) + cov.fit(X) + assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + cov = ShrunkCovariance(shrinkage=0.3) + cov.fit(X_1d) + assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4) + + # test shrinkage coeff on a simple data set (without saving precision) + cov = ShrunkCovariance(shrinkage=0.5, store_precision=False) + cov.fit(X) + assert cov.precision_ is None + + +def test_ledoit_wolf(): + # Tests LedoitWolf module on a simple dataset. + # test shrinkage coeff on a simple data set + X_centered = X - X.mean(axis=0) + lw = LedoitWolf(assume_centered=True) + lw.fit(X_centered) + shrinkage_ = lw.shrinkage_ + + score_ = lw.score(X_centered) + assert_almost_equal( + ledoit_wolf_shrinkage(X_centered, assume_centered=True), shrinkage_ + ) + assert_almost_equal( + ledoit_wolf_shrinkage(X_centered, assume_centered=True, block_size=6), + shrinkage_, + ) + # compare shrunk covariance obtained from data and from MLE estimate + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf( + X_centered, assume_centered=True + ) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + # compare estimates given by LW and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True) + scov.fit(X_centered) + assert_array_almost_equal(scov.covariance_, lw.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + lw = LedoitWolf(assume_centered=True) + lw.fit(X_1d) + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d, assume_centered=True) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + assert_array_almost_equal((X_1d**2).sum() / n_samples, lw.covariance_, 4) + + # test shrinkage coeff on a simple data set (without saving precision) + lw = LedoitWolf(store_precision=False, assume_centered=True) + lw.fit(X_centered) + assert_almost_equal(lw.score(X_centered), score_, 4) + assert lw.precision_ is None + + # Same tests without assuming centered data + # test shrinkage coeff on a simple data set + lw = LedoitWolf() + lw.fit(X) + assert_almost_equal(lw.shrinkage_, shrinkage_, 4) + assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X)) + assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1]) + assert_almost_equal( + lw.shrinkage_, _ledoit_wolf(X=X, assume_centered=False, block_size=10000)[1] + ) + assert_almost_equal(lw.score(X), score_, 4) + # compare shrunk covariance obtained from data and from MLE estimate + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + # compare estimates given by LW and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=lw.shrinkage_) + scov.fit(X) + assert_array_almost_equal(scov.covariance_, lw.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + lw = LedoitWolf() + lw.fit(X_1d) + assert_allclose( + X_1d.var(ddof=0), + _ledoit_wolf(X=X_1d, assume_centered=False, block_size=10000)[0], + ) + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4) + + # test with one sample + # warning should be raised when using only 1 sample + X_1sample = np.arange(5).reshape(1, 5) + lw = LedoitWolf() + + warn_msg = "Only one sample available. You may want to reshape your data array" + with pytest.warns(UserWarning, match=warn_msg): + lw.fit(X_1sample) + + assert_array_almost_equal(lw.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) + + # test shrinkage coeff on a simple data set (without saving precision) + lw = LedoitWolf(store_precision=False) + lw.fit(X) + assert_almost_equal(lw.score(X), score_, 4) + assert lw.precision_ is None + + +def _naive_ledoit_wolf_shrinkage(X): + # A simple implementation of the formulas from Ledoit & Wolf + + # The computation below achieves the following computations of the + # "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for + # Large-Dimensional Covariance Matrices" + # beta and delta are given in the beginning of section 3.2 + n_samples, n_features = X.shape + emp_cov = empirical_covariance(X, assume_centered=False) + mu = np.trace(emp_cov) / n_features + delta_ = emp_cov.copy() + delta_.flat[:: n_features + 1] -= mu + delta = (delta_**2).sum() / n_features + X2 = X**2 + beta_ = ( + 1.0 + / (n_features * n_samples) + * np.sum(np.dot(X2.T, X2) / n_samples - emp_cov**2) + ) + + beta = min(beta_, delta) + shrinkage = beta / delta + return shrinkage + + +def test_ledoit_wolf_small(): + # Compare our blocked implementation to the naive implementation + X_small = X[:, :4] + lw = LedoitWolf() + lw.fit(X_small) + shrinkage_ = lw.shrinkage_ + + assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small)) + + +def test_ledoit_wolf_large(): + # test that ledoit_wolf doesn't error on data that is wider than block_size + rng = np.random.RandomState(0) + # use a number of features that is larger than the block-size + X = rng.normal(size=(10, 20)) + lw = LedoitWolf(block_size=10).fit(X) + # check that covariance is about diagonal (random normal noise) + assert_almost_equal(lw.covariance_, np.eye(20), 0) + cov = lw.covariance_ + + # check that the result is consistent with not splitting data into blocks. + lw = LedoitWolf(block_size=25).fit(X) + assert_almost_equal(lw.covariance_, cov) + + +@pytest.mark.parametrize( + "ledoit_wolf_fitting_function", [LedoitWolf().fit, ledoit_wolf_shrinkage] +) +def test_ledoit_wolf_empty_array(ledoit_wolf_fitting_function): + """Check that we validate X and raise proper error with 0-sample array.""" + X_empty = np.zeros((0, 2)) + with pytest.raises(ValueError, match="Found array with 0 sample"): + ledoit_wolf_fitting_function(X_empty) + + +def test_oas(): + # Tests OAS module on a simple dataset. + # test shrinkage coeff on a simple data set + X_centered = X - X.mean(axis=0) + oa = OAS(assume_centered=True) + oa.fit(X_centered) + shrinkage_ = oa.shrinkage_ + score_ = oa.score(X_centered) + # compare shrunk covariance obtained from data and from MLE estimate + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered, assume_centered=True) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + # compare estimates given by OAS and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True) + scov.fit(X_centered) + assert_array_almost_equal(scov.covariance_, oa.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0:1] + oa = OAS(assume_centered=True) + oa.fit(X_1d) + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + assert_array_almost_equal((X_1d**2).sum() / n_samples, oa.covariance_, 4) + + # test shrinkage coeff on a simple data set (without saving precision) + oa = OAS(store_precision=False, assume_centered=True) + oa.fit(X_centered) + assert_almost_equal(oa.score(X_centered), score_, 4) + assert oa.precision_ is None + + # Same tests without assuming centered data-------------------------------- + # test shrinkage coeff on a simple data set + oa = OAS() + oa.fit(X) + assert_almost_equal(oa.shrinkage_, shrinkage_, 4) + assert_almost_equal(oa.score(X), score_, 4) + # compare shrunk covariance obtained from data and from MLE estimate + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + # compare estimates given by OAS and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=oa.shrinkage_) + scov.fit(X) + assert_array_almost_equal(scov.covariance_, oa.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + oa = OAS() + oa.fit(X_1d) + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4) + + # test with one sample + # warning should be raised when using only 1 sample + X_1sample = np.arange(5).reshape(1, 5) + oa = OAS() + warn_msg = "Only one sample available. You may want to reshape your data array" + with pytest.warns(UserWarning, match=warn_msg): + oa.fit(X_1sample) + + assert_array_almost_equal(oa.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) + + # test shrinkage coeff on a simple data set (without saving precision) + oa = OAS(store_precision=False) + oa.fit(X) + assert_almost_equal(oa.score(X), score_, 4) + assert oa.precision_ is None + + # test function _oas without assuming centered data + X_1f = X[:, 0:1] + oa = OAS() + oa.fit(X_1f) + # compare shrunk covariance obtained from data and from MLE estimate + _oa_cov_from_mle, _oa_shrinkage_from_mle = _oas(X_1f) + assert_array_almost_equal(_oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(_oa_shrinkage_from_mle, oa.shrinkage_) + assert_array_almost_equal((X_1f**2).sum() / n_samples, oa.covariance_, 4) + + +def test_EmpiricalCovariance_validates_mahalanobis(): + """Checks that EmpiricalCovariance validates data with mahalanobis.""" + cov = EmpiricalCovariance().fit(X) + + msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input" + with pytest.raises(ValueError, match=msg): + cov.mahalanobis(X[:, :2]) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py new file mode 100644 index 0000000000000000000000000000000000000000..ca85717fb378243ff8dcb75db1adade9a6c50c18 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py @@ -0,0 +1,52 @@ +""" +Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope). +""" + +import numpy as np +import pytest + +from sklearn.covariance import EllipticEnvelope +from sklearn.exceptions import NotFittedError +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + + +def test_elliptic_envelope(global_random_seed): + rnd = np.random.RandomState(global_random_seed) + X = rnd.randn(100, 10) + clf = EllipticEnvelope(contamination=0.1) + with pytest.raises(NotFittedError): + clf.predict(X) + with pytest.raises(NotFittedError): + clf.decision_function(X) + clf.fit(X) + y_pred = clf.predict(X) + scores = clf.score_samples(X) + decisions = clf.decision_function(X) + + assert_array_almost_equal(scores, -clf.mahalanobis(X)) + assert_array_almost_equal(clf.mahalanobis(X), clf.dist_) + assert_almost_equal( + clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.0 + ) + assert sum(y_pred == -1) == sum(decisions < 0) + + +def test_score_samples(): + X_train = [[1, 1], [1, 2], [2, 1]] + clf1 = EllipticEnvelope(contamination=0.2).fit(X_train) + clf2 = EllipticEnvelope().fit(X_train) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), + clf1.decision_function([[2.0, 2.0]]) + clf1.offset_, + ) + assert_array_equal( + clf2.score_samples([[2.0, 2.0]]), + clf2.decision_function([[2.0, 2.0]]) + clf2.offset_, + ) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]]) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py new file mode 100644 index 0000000000000000000000000000000000000000..a7d251a5bbdfe40e5a422111ab3e1c187e0efbed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py @@ -0,0 +1,286 @@ +""" Test the graphical_lasso module. +""" +import sys +from io import StringIO + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import linalg + +from sklearn import datasets +from sklearn.covariance import ( + GraphicalLasso, + GraphicalLassoCV, + empirical_covariance, + graphical_lasso, +) +from sklearn.datasets import make_sparse_spd_matrix +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + _convert_container, + assert_array_almost_equal, + assert_array_less, +) + + +def test_graphical_lassos(random_state=1): + """Test the graphical lasso solvers. + + This checks is unstable for some random seeds where the covariance found with "cd" + and "lars" solvers are different (4 cases / 100 tries). + """ + # Sample data from a sparse multivariate normal + dim = 20 + n_samples = 100 + random_state = check_random_state(random_state) + prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state) + cov = linalg.inv(prec) + X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples) + emp_cov = empirical_covariance(X) + + for alpha in (0.0, 0.1, 0.25): + covs = dict() + icovs = dict() + for method in ("cd", "lars"): + cov_, icov_, costs = graphical_lasso( + emp_cov, return_costs=True, alpha=alpha, mode=method + ) + covs[method] = cov_ + icovs[method] = icov_ + costs, dual_gap = np.array(costs).T + # Check that the costs always decrease (doesn't hold if alpha == 0) + if not alpha == 0: + # use 1e-12 since the cost can be exactly 0 + assert_array_less(np.diff(costs), 1e-12) + # Check that the 2 approaches give similar results + assert_allclose(covs["cd"], covs["lars"], atol=5e-4) + assert_allclose(icovs["cd"], icovs["lars"], atol=5e-4) + + # Smoke test the estimator + model = GraphicalLasso(alpha=0.25).fit(X) + model.score(X) + assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4) + assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4) + + # For a centered matrix, assume_centered could be chosen True or False + # Check that this returns indeed the same result for centered data + Z = X - X.mean(0) + precs = list() + for assume_centered in (False, True): + prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_ + precs.append(prec_) + assert_array_almost_equal(precs[0], precs[1]) + + +def test_graphical_lasso_when_alpha_equals_0(): + """Test graphical_lasso's early return condition when alpha=0.""" + X = np.random.randn(100, 10) + emp_cov = empirical_covariance(X, assume_centered=True) + + model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov) + assert_allclose(model.precision_, np.linalg.inv(emp_cov)) + + _, precision = graphical_lasso(emp_cov, alpha=0) + assert_allclose(precision, np.linalg.inv(emp_cov)) + + +@pytest.mark.parametrize("mode", ["cd", "lars"]) +def test_graphical_lasso_n_iter(mode): + X, _ = datasets.make_classification(n_samples=5_000, n_features=20, random_state=0) + emp_cov = empirical_covariance(X) + + _, _, n_iter = graphical_lasso( + emp_cov, 0.2, mode=mode, max_iter=2, return_n_iter=True + ) + assert n_iter == 2 + + +def test_graphical_lasso_iris(): + # Hard-coded solution from R glasso package for alpha=1.0 + # (need to set penalize.diagonal to FALSE) + cov_R = np.array( + [ + [0.68112222, 0.0000000, 0.265820, 0.02464314], + [0.00000000, 0.1887129, 0.000000, 0.00000000], + [0.26582000, 0.0000000, 3.095503, 0.28697200], + [0.02464314, 0.0000000, 0.286972, 0.57713289], + ] + ) + icov_R = np.array( + [ + [1.5190747, 0.000000, -0.1304475, 0.0000000], + [0.0000000, 5.299055, 0.0000000, 0.0000000], + [-0.1304475, 0.000000, 0.3498624, -0.1683946], + [0.0000000, 0.000000, -0.1683946, 1.8164353], + ] + ) + X = datasets.load_iris().data + emp_cov = empirical_covariance(X) + for method in ("cd", "lars"): + cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method) + assert_array_almost_equal(cov, cov_R) + assert_array_almost_equal(icov, icov_R) + + +def test_graph_lasso_2D(): + # Hard-coded solution from Python skggm package + # obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)` + cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]]) + + icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]]) + X = datasets.load_iris().data[:, 2:] + emp_cov = empirical_covariance(X) + for method in ("cd", "lars"): + cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method) + assert_array_almost_equal(cov, cov_skggm) + assert_array_almost_equal(icov, icov_skggm) + + +def test_graphical_lasso_iris_singular(): + # Small subset of rows to test the rank-deficient case + # Need to choose samples such that none of the variances are zero + indices = np.arange(10, 13) + + # Hard-coded solution from R glasso package for alpha=0.01 + cov_R = np.array( + [ + [0.08, 0.056666662595, 0.00229729713223, 0.00153153142149], + [0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222], + [0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009], + [0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222], + ] + ) + icov_R = np.array( + [ + [24.42244057, -16.831679593, 0.0, 0.0], + [-16.83168201, 24.351841681, -6.206896552, -12.5], + [0.0, -6.206896171, 153.103448276, 0.0], + [0.0, -12.499999143, 0.0, 462.5], + ] + ) + X = datasets.load_iris().data[indices, :] + emp_cov = empirical_covariance(X) + for method in ("cd", "lars"): + cov, icov = graphical_lasso( + emp_cov, alpha=0.01, return_costs=False, mode=method + ) + assert_array_almost_equal(cov, cov_R, decimal=5) + assert_array_almost_equal(icov, icov_R, decimal=5) + + +def test_graphical_lasso_cv(random_state=1): + # Sample data from a sparse multivariate normal + dim = 5 + n_samples = 6 + random_state = check_random_state(random_state) + prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state) + cov = linalg.inv(prec) + X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples) + # Capture stdout, to smoke test the verbose mode + orig_stdout = sys.stdout + try: + sys.stdout = StringIO() + # We need verbose very high so that Parallel prints on stdout + GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X) + finally: + sys.stdout = orig_stdout + + +@pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"]) +def test_graphical_lasso_cv_alphas_iterable(alphas_container_type): + """Check that we can pass an array-like to `alphas`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/22489 + """ + true_cov = np.array( + [ + [0.8, 0.0, 0.2, 0.0], + [0.0, 0.4, 0.0, 0.0], + [0.2, 0.0, 0.3, 0.1], + [0.0, 0.0, 0.1, 0.7], + ] + ) + rng = np.random.RandomState(0) + X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) + alphas = _convert_container([0.02, 0.03], alphas_container_type) + GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X) + + +@pytest.mark.parametrize( + "alphas,err_type,err_msg", + [ + ([-0.02, 0.03], ValueError, "must be > 0"), + ([0, 0.03], ValueError, "must be > 0"), + (["not_number", 0.03], TypeError, "must be an instance of float"), + ], +) +def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg): + """Check that if an array-like containing a value + outside of (0, inf] is passed to `alphas`, a ValueError is raised. + Check if a string is passed, a TypeError is raised. + """ + true_cov = np.array( + [ + [0.8, 0.0, 0.2, 0.0], + [0.0, 0.4, 0.0, 0.0], + [0.2, 0.0, 0.3, 0.1], + [0.0, 0.0, 0.1, 0.7], + ] + ) + rng = np.random.RandomState(0) + X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) + + with pytest.raises(err_type, match=err_msg): + GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X) + + +def test_graphical_lasso_cv_scores(): + splits = 4 + n_alphas = 5 + n_refinements = 3 + true_cov = np.array( + [ + [0.8, 0.0, 0.2, 0.0], + [0.0, 0.4, 0.0, 0.0], + [0.2, 0.0, 0.3, 0.1], + [0.0, 0.0, 0.1, 0.7], + ] + ) + rng = np.random.RandomState(0) + X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) + cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit( + X + ) + + cv_results = cov.cv_results_ + # alpha and one for each split + + total_alphas = n_refinements * n_alphas + 1 + keys = ["alphas"] + split_keys = [f"split{i}_test_score" for i in range(splits)] + for key in keys + split_keys: + assert key in cv_results + assert len(cv_results[key]) == total_alphas + + cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys]) + expected_mean = cv_scores.mean(axis=0) + expected_std = cv_scores.std(axis=0) + + assert_allclose(cov.cv_results_["mean_test_score"], expected_mean) + assert_allclose(cov.cv_results_["std_test_score"], expected_std) + + +# TODO(1.5): remove in 1.5 +def test_graphical_lasso_cov_init_deprecation(): + """Check that we raise a deprecation warning if providing `cov_init` in + `graphical_lasso`.""" + rng, dim, n_samples = np.random.RandomState(0), 20, 100 + prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=0) + cov = linalg.inv(prec) + X = rng.multivariate_normal(np.zeros(dim), cov, size=n_samples) + + emp_cov = empirical_covariance(X) + with pytest.warns(FutureWarning, match="cov_init parameter is deprecated"): + graphical_lasso(emp_cov, alpha=0.1, cov_init=emp_cov) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..44dcdbbbf824934a8f31bc832a389d90f396c6d6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py @@ -0,0 +1,171 @@ +# Author: Alexandre Gramfort +# Gael Varoquaux +# Virgile Fritsch +# +# License: BSD 3 clause + +import itertools + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.covariance import MinCovDet, empirical_covariance, fast_mcd +from sklearn.utils._testing import assert_array_almost_equal + +X = datasets.load_iris().data +X_1d = X[:, 0] +n_samples, n_features = X.shape + + +def test_mcd(global_random_seed): + # Tests the FastMCD algorithm implementation + # Small data set + # test without outliers (random independent normal data) + launch_mcd_on_dataset(100, 5, 0, 0.02, 0.1, 75, global_random_seed) + # test with a contaminated data set (medium contamination) + launch_mcd_on_dataset(100, 5, 20, 0.3, 0.3, 65, global_random_seed) + # test with a contaminated data set (strong contamination) + launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50, global_random_seed) + + # Medium data set + launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540, global_random_seed) + + # Large data set + launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870, global_random_seed) + + # 1D data set + launch_mcd_on_dataset(500, 1, 100, 0.02, 0.02, 350, global_random_seed) + + +def test_fast_mcd_on_invalid_input(): + X = np.arange(100) + msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=msg): + fast_mcd(X) + + +def test_mcd_class_on_invalid_input(): + X = np.arange(100) + mcd = MinCovDet() + msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=msg): + mcd.fit(X) + + +def launch_mcd_on_dataset( + n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support, seed +): + rand_gen = np.random.RandomState(seed) + data = rand_gen.randn(n_samples, n_features) + # add some outliers + outliers_index = rand_gen.permutation(n_samples)[:n_outliers] + outliers_offset = 10.0 * (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5) + data[outliers_index] += outliers_offset + inliers_mask = np.ones(n_samples).astype(bool) + inliers_mask[outliers_index] = False + + pure_data = data[inliers_mask] + # compute MCD by fitting an object + mcd_fit = MinCovDet(random_state=seed).fit(data) + T = mcd_fit.location_ + S = mcd_fit.covariance_ + H = mcd_fit.support_ + # compare with the estimates learnt from the inliers + error_location = np.mean((pure_data.mean(0) - T) ** 2) + assert error_location < tol_loc + error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2) + assert error_cov < tol_cov + assert np.sum(H) >= tol_support + assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_) + + +def test_mcd_issue1127(): + # Check that the code does not break with X.shape = (3, 1) + # (i.e. n_support = n_samples) + rnd = np.random.RandomState(0) + X = rnd.normal(size=(3, 1)) + mcd = MinCovDet() + mcd.fit(X) + + +def test_mcd_issue3367(global_random_seed): + # Check that MCD completes when the covariance matrix is singular + # i.e. one of the rows and columns are all zeros + rand_gen = np.random.RandomState(global_random_seed) + + # Think of these as the values for X and Y -> 10 values between -5 and 5 + data_values = np.linspace(-5, 5, 10).tolist() + # Get the cartesian product of all possible coordinate pairs from above set + data = np.array(list(itertools.product(data_values, data_values))) + + # Add a third column that's all zeros to make our data a set of point + # within a plane, which means that the covariance matrix will be singular + data = np.hstack((data, np.zeros((data.shape[0], 1)))) + + # The below line of code should raise an exception if the covariance matrix + # is singular. As a further test, since we have points in XYZ, the + # principle components (Eigenvectors) of these directly relate to the + # geometry of the points. Since it's a plane, we should be able to test + # that the Eigenvector that corresponds to the smallest Eigenvalue is the + # plane normal, specifically [0, 0, 1], since everything is in the XY plane + # (as I've set it up above). To do this one would start by: + # + # evals, evecs = np.linalg.eigh(mcd_fit.covariance_) + # normal = evecs[:, np.argmin(evals)] + # + # After which we need to assert that our `normal` is equal to [0, 0, 1]. + # Do note that there is floating point error associated with this, so it's + # best to subtract the two and then compare some small tolerance (e.g. + # 1e-12). + MinCovDet(random_state=rand_gen).fit(data) + + +def test_mcd_support_covariance_is_zero(): + # Check that MCD returns a ValueError with informative message when the + # covariance of the support data is equal to 0. + X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1]) + X_1 = X_1.reshape(-1, 1) + X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3]) + X_2 = X_2.reshape(-1, 1) + msg = ( + "The covariance matrix of the support data is equal to 0, try to " + "increase support_fraction" + ) + for X in [X_1, X_2]: + with pytest.raises(ValueError, match=msg): + MinCovDet().fit(X) + + +def test_mcd_increasing_det_warning(global_random_seed): + # Check that a warning is raised if we observe increasing determinants + # during the c_step. In theory the sequence of determinants should be + # decreasing. Increasing determinants are likely due to ill-conditioned + # covariance matrices that result in poor precision matrices. + + X = [ + [5.1, 3.5, 1.4, 0.2], + [4.9, 3.0, 1.4, 0.2], + [4.7, 3.2, 1.3, 0.2], + [4.6, 3.1, 1.5, 0.2], + [5.0, 3.6, 1.4, 0.2], + [4.6, 3.4, 1.4, 0.3], + [5.0, 3.4, 1.5, 0.2], + [4.4, 2.9, 1.4, 0.2], + [4.9, 3.1, 1.5, 0.1], + [5.4, 3.7, 1.5, 0.2], + [4.8, 3.4, 1.6, 0.2], + [4.8, 3.0, 1.4, 0.1], + [4.3, 3.0, 1.1, 0.1], + [5.1, 3.5, 1.4, 0.3], + [5.7, 3.8, 1.7, 0.3], + [5.4, 3.4, 1.7, 0.2], + [4.6, 3.6, 1.0, 0.2], + [5.0, 3.0, 1.6, 0.2], + [5.2, 3.5, 1.5, 0.2], + ] + + mcd = MinCovDet(support_fraction=0.5, random_state=global_random_seed) + warn_msg = "Determinant has increased" + with pytest.warns(RuntimeWarning, match=warn_msg): + mcd.fit(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..69e40ce08aed005186416588531f644eb566f150 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_base.py @@ -0,0 +1,266 @@ +"""Generic feature selection mixin""" + +# Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from operator import attrgetter + +import numpy as np +from scipy.sparse import csc_matrix, issparse + +from ..base import TransformerMixin +from ..utils import ( + _is_pandas_df, + _safe_indexing, + check_array, + safe_sqr, +) +from ..utils._set_output import _get_output_config +from ..utils._tags import _safe_tags +from ..utils.validation import _check_feature_names_in, check_is_fitted + + +class SelectorMixin(TransformerMixin, metaclass=ABCMeta): + """ + Transformer mixin that performs feature selection given a support mask + + This mixin provides a feature selector implementation with `transform` and + `inverse_transform` functionality given an implementation of + `_get_support_mask`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> from sklearn.base import BaseEstimator + >>> from sklearn.feature_selection import SelectorMixin + >>> class FeatureSelector(SelectorMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.n_features_in_ = X.shape[1] + ... return self + ... def _get_support_mask(self): + ... mask = np.zeros(self.n_features_in_, dtype=bool) + ... mask[:2] = True # select the first two features + ... return mask + >>> X, y = load_iris(return_X_y=True) + >>> FeatureSelector().fit_transform(X, y).shape + (150, 2) + """ + + def get_support(self, indices=False): + """ + Get a mask, or integer index, of the features selected. + + Parameters + ---------- + indices : bool, default=False + If True, the return value will be an array of integers, rather + than a boolean mask. + + Returns + ------- + support : array + An index that selects the retained features from a feature vector. + If `indices` is False, this is a boolean array of shape + [# input features], in which an element is True iff its + corresponding feature is selected for retention. If `indices` is + True, this is an integer array of shape [# output features] whose + values are indices into the input feature vector. + """ + mask = self._get_support_mask() + return mask if not indices else np.where(mask)[0] + + @abstractmethod + def _get_support_mask(self): + """ + Get the boolean mask indicating which features are selected + + Returns + ------- + support : boolean array of shape [# input features] + An element is True iff its corresponding feature is selected for + retention. + """ + + def transform(self, X): + """Reduce X to the selected features. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + X_r : array of shape [n_samples, n_selected_features] + The input samples with only the selected features. + """ + # Preserve X when X is a dataframe and the output is configured to + # be pandas. + output_config_dense = _get_output_config("transform", estimator=self)["dense"] + preserve_X = output_config_dense != "default" and _is_pandas_df(X) + + # note: we use _safe_tags instead of _get_tags because this is a + # public Mixin. + X = self._validate_data( + X, + dtype=None, + accept_sparse="csr", + force_all_finite=not _safe_tags(self, key="allow_nan"), + cast_to_ndarray=not preserve_X, + reset=False, + ) + return self._transform(X) + + def _transform(self, X): + """Reduce X to the selected features.""" + mask = self.get_support() + if not mask.any(): + warnings.warn( + ( + "No features were selected: either the data is" + " too noisy or the selection test too strict." + ), + UserWarning, + ) + if hasattr(X, "iloc"): + return X.iloc[:, :0] + return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0)) + return _safe_indexing(X, mask, axis=1) + + def inverse_transform(self, X): + """Reverse the transformation operation. + + Parameters + ---------- + X : array of shape [n_samples, n_selected_features] + The input samples. + + Returns + ------- + X_r : array of shape [n_samples, n_original_features] + `X` with columns of zeros inserted where features would have + been removed by :meth:`transform`. + """ + if issparse(X): + X = X.tocsc() + # insert additional entries in indptr: + # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3] + # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3] + it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1)) + col_nonzeros = it.ravel() + indptr = np.concatenate([[0], np.cumsum(col_nonzeros)]) + Xt = csc_matrix( + (X.data, X.indices, indptr), + shape=(X.shape[0], len(indptr) - 1), + dtype=X.dtype, + ) + return Xt + + support = self.get_support() + X = check_array(X, dtype=None) + if support.sum() != X.shape[1]: + raise ValueError("X has a different shape than during fitting.") + + if X.ndim == 1: + X = X[None, :] + Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype) + Xt[:, support] = X + return Xt + + def get_feature_names_out(self, input_features=None): + """Mask feature names according to selected features. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + return input_features[self.get_support()] + + +def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1): + """ + Retrieve and aggregate (ndim > 1) the feature importances + from an estimator. Also optionally applies transformation. + + Parameters + ---------- + estimator : estimator + A scikit-learn estimator from which we want to get the feature + importances. + + getter : "auto", str or callable + An attribute or a callable to get the feature importance. If `"auto"`, + `estimator` is expected to expose `coef_` or `feature_importances`. + + transform_func : {"norm", "square"}, default=None + The transform to apply to the feature importances. By default (`None`) + no transformation is applied. + + norm_order : int, default=1 + The norm order to apply when `transform_func="norm"`. Only applied + when `importances.ndim > 1`. + + Returns + ------- + importances : ndarray of shape (n_features,) + The features importances, optionally transformed. + """ + if isinstance(getter, str): + if getter == "auto": + if hasattr(estimator, "coef_"): + getter = attrgetter("coef_") + elif hasattr(estimator, "feature_importances_"): + getter = attrgetter("feature_importances_") + else: + raise ValueError( + "when `importance_getter=='auto'`, the underlying " + f"estimator {estimator.__class__.__name__} should have " + "`coef_` or `feature_importances_` attribute. Either " + "pass a fitted estimator to feature selector or call fit " + "before calling transform." + ) + else: + getter = attrgetter(getter) + elif not callable(getter): + raise ValueError("`importance_getter` has to be a string or `callable`") + + importances = getter(estimator) + + if transform_func is None: + return importances + elif transform_func == "norm": + if importances.ndim == 1: + importances = np.abs(importances) + else: + importances = np.linalg.norm(importances, axis=0, ord=norm_order) + elif transform_func == "square": + if importances.ndim == 1: + importances = safe_sqr(importances) + else: + importances = safe_sqr(importances).sum(axis=0) + else: + raise ValueError( + "Valid values for `transform_func` are " + + "None, 'norm' and 'square'. Those two " + + "transformation are only supported now" + ) + + return importances diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py new file mode 100644 index 0000000000000000000000000000000000000000..61addedd2de787ccc38135147c7df6f895dc53b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py @@ -0,0 +1,522 @@ +# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena +# License: BSD 3 clause + +from copy import deepcopy +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone +from ..exceptions import NotFittedError +from ..utils._param_validation import HasMethods, Interval, Options +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import available_if +from ..utils.validation import _num_features, check_is_fitted, check_scalar +from ._base import SelectorMixin, _get_feature_importances + + +def _calculate_threshold(estimator, importances, threshold): + """Interpret the threshold value""" + + if threshold is None: + # determine default from estimator + est_name = estimator.__class__.__name__ + is_l1_penalized = hasattr(estimator, "penalty") and estimator.penalty == "l1" + is_lasso = "Lasso" in est_name + is_elasticnet_l1_penalized = "ElasticNet" in est_name and ( + (hasattr(estimator, "l1_ratio_") and np.isclose(estimator.l1_ratio_, 1.0)) + or (hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0)) + ) + if is_l1_penalized or is_lasso or is_elasticnet_l1_penalized: + # the natural default threshold is 0 when l1 penalty was used + threshold = 1e-5 + else: + threshold = "mean" + + if isinstance(threshold, str): + if "*" in threshold: + scale, reference = threshold.split("*") + scale = float(scale.strip()) + reference = reference.strip() + + if reference == "median": + reference = np.median(importances) + elif reference == "mean": + reference = np.mean(importances) + else: + raise ValueError("Unknown reference: " + reference) + + threshold = scale * reference + + elif threshold == "median": + threshold = np.median(importances) + + elif threshold == "mean": + threshold = np.mean(importances) + + else: + raise ValueError( + "Expected threshold='mean' or threshold='median' got %s" % threshold + ) + + else: + threshold = float(threshold) + + return threshold + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `estimator_` if available, otherwise we check the + unfitted `estimator`. We raise the original `AttributeError` if `attr` does + not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimator_"): + getattr(self.estimator_, attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator): + """Meta-transformer for selecting features based on importance weights. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + The base estimator from which the transformer is built. + This can be both a fitted (if ``prefit`` is set to True) + or a non-fitted estimator. The estimator should have a + ``feature_importances_`` or ``coef_`` attribute after fitting. + Otherwise, the ``importance_getter`` parameter should be used. + + threshold : str or float, default=None + The threshold value to use for feature selection. Features whose + absolute importance value is greater or equal are kept while the others + are discarded. If "median" (resp. "mean"), then the ``threshold`` value + is the median (resp. the mean) of the feature importances. A scaling + factor (e.g., "1.25*mean") may also be used. If None and if the + estimator has a parameter penalty set to l1, either explicitly + or implicitly (e.g, Lasso), the threshold used is 1e-5. + Otherwise, "mean" is used by default. + + prefit : bool, default=False + Whether a prefit model is expected to be passed into the constructor + directly or not. + If `True`, `estimator` must be a fitted estimator. + If `False`, `estimator` is fitted and updated by calling + `fit` and `partial_fit`, respectively. + + norm_order : non-zero int, inf, -inf, default=1 + Order of the norm used to filter the vectors of coefficients below + ``threshold`` in the case where the ``coef_`` attribute of the + estimator is of dimension 2. + + max_features : int, callable, default=None + The maximum number of features to select. + + - If an integer, then it specifies the maximum number of features to + allow. + - If a callable, then it specifies how to calculate the maximum number of + features allowed by using the output of `max_features(X)`. + - If `None`, then all features are kept. + + To only select based on ``max_features``, set ``threshold=-np.inf``. + + .. versionadded:: 0.20 + .. versionchanged:: 1.1 + `max_features` accepts a callable. + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a ``coef_`` + attribute or ``feature_importances_`` attribute of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance (implemented with `attrgetter`). + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the transformer is built. This attribute + exist only when `fit` has been called. + + - If `prefit=True`, it is a deep copy of `estimator`. + - If `prefit=False`, it is a clone of `estimator` and fit on the data + passed to `fit` or `partial_fit`. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + max_features_ : int + Maximum number of features calculated during :term:`fit`. Only defined + if the ``max_features`` is not `None`. + + - If `max_features` is an `int`, then `max_features_ = max_features`. + - If `max_features` is a callable, then `max_features_ = max_features(X)`. + + .. versionadded:: 1.1 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + threshold_ : float + The threshold value used for feature selection. + + See Also + -------- + RFE : Recursive feature elimination based on importance weights. + RFECV : Recursive feature elimination with built-in cross-validated + selection of the best number of features. + SequentialFeatureSelector : Sequential cross-validation based feature + selection. Does not rely on importance weights. + + Notes + ----- + Allows NaN/Inf in the input if the underlying estimator does as well. + + Examples + -------- + >>> from sklearn.feature_selection import SelectFromModel + >>> from sklearn.linear_model import LogisticRegression + >>> X = [[ 0.87, -1.34, 0.31 ], + ... [-2.79, -0.02, -0.85 ], + ... [-1.34, -0.48, -2.55 ], + ... [ 1.92, 1.48, 0.65 ]] + >>> y = [0, 1, 0, 1] + >>> selector = SelectFromModel(estimator=LogisticRegression()).fit(X, y) + >>> selector.estimator_.coef_ + array([[-0.3252..., 0.8345..., 0.4976...]]) + >>> selector.threshold_ + 0.55249... + >>> selector.get_support() + array([False, True, False]) + >>> selector.transform(X) + array([[-1.34], + [-0.02], + [-0.48], + [ 1.48]]) + + Using a callable to create a selector that can use no more than half + of the input features. + + >>> def half_callable(X): + ... return round(len(X[0]) / 2) + >>> half_selector = SelectFromModel(estimator=LogisticRegression(), + ... max_features=half_callable) + >>> _ = half_selector.fit(X, y) + >>> half_selector.max_features_ + 2 + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods("fit")], + "threshold": [Interval(Real, None, None, closed="both"), str, None], + "prefit": ["boolean"], + "norm_order": [ + Interval(Integral, None, -1, closed="right"), + Interval(Integral, 1, None, closed="left"), + Options(Real, {np.inf, -np.inf}), + ], + "max_features": [Interval(Integral, 0, None, closed="left"), callable, None], + "importance_getter": [str, callable], + } + + def __init__( + self, + estimator, + *, + threshold=None, + prefit=False, + norm_order=1, + max_features=None, + importance_getter="auto", + ): + self.estimator = estimator + self.threshold = threshold + self.prefit = prefit + self.importance_getter = importance_getter + self.norm_order = norm_order + self.max_features = max_features + + def _get_support_mask(self): + estimator = getattr(self, "estimator_", self.estimator) + max_features = getattr(self, "max_features_", self.max_features) + + if self.prefit: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + if callable(max_features): + # This branch is executed when `transform` is called directly and thus + # `max_features_` is not set and we fallback using `self.max_features` + # that is not validated + raise NotFittedError( + "When `prefit=True` and `max_features` is a callable, call `fit` " + "before calling `transform`." + ) + elif max_features is not None and not isinstance(max_features, Integral): + raise ValueError( + f"`max_features` must be an integer. Got `max_features={max_features}` " + "instead." + ) + + scores = _get_feature_importances( + estimator=estimator, + getter=self.importance_getter, + transform_func="norm", + norm_order=self.norm_order, + ) + threshold = _calculate_threshold(estimator, scores, self.threshold) + if self.max_features is not None: + mask = np.zeros_like(scores, dtype=bool) + candidate_indices = np.argsort(-scores, kind="mergesort")[:max_features] + mask[candidate_indices] = True + else: + mask = np.ones_like(scores, dtype=bool) + mask[scores < threshold] = False + return mask + + def _check_max_features(self, X): + if self.max_features is not None: + n_features = _num_features(X) + + if callable(self.max_features): + max_features = self.max_features(X) + else: # int + max_features = self.max_features + + check_scalar( + max_features, + "max_features", + Integral, + min_val=0, + max_val=n_features, + ) + self.max_features_ = max_features + + @_fit_context( + # SelectFromModel.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **fit_params): + """Fit the SelectFromModel meta-transformer. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,), default=None + The target values (integers that correspond to classes in + classification, real numbers in regression). + + **fit_params : dict + - If `enable_metadata_routing=False` (default): + + Parameters directly passed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + - If `enable_metadata_routing=True`: + + Parameters safely routed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + .. versionchanged:: 1.4 + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + """ + self._check_max_features(X) + + if self.prefit: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + self.estimator_ = deepcopy(self.estimator) + else: + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X, y, **routed_params.estimator.fit) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X, y, **fit_params) + + if hasattr(self.estimator_, "feature_names_in_"): + self.feature_names_in_ = self.estimator_.feature_names_in_ + else: + self._check_feature_names(X, reset=True) + + return self + + @property + def threshold_(self): + """Threshold value used for feature selection.""" + scores = _get_feature_importances( + estimator=self.estimator_, + getter=self.importance_getter, + transform_func="norm", + norm_order=self.norm_order, + ) + return _calculate_threshold(self.estimator, scores, self.threshold) + + @available_if(_estimator_has("partial_fit")) + @_fit_context( + # SelectFromModel.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y=None, **partial_fit_params): + """Fit the SelectFromModel meta-transformer only once. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,), default=None + The target values (integers that correspond to classes in + classification, real numbers in regression). + + **partial_fit_params : dict + - If `enable_metadata_routing=False` (default): + + Parameters directly passed to the `partial_fit` method of the + sub-estimator. + + - If `enable_metadata_routing=True`: + + Parameters passed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + .. versionchanged:: 1.4 + `**partial_fit_params` are routed to the sub-estimator, if + `enable_metadata_routing=True` is set via + :func:`~sklearn.set_config`, which allows for aliasing. + + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + """ + first_call = not hasattr(self, "estimator_") + + if first_call: + self._check_max_features(X) + + if self.prefit: + if first_call: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + self.estimator_ = deepcopy(self.estimator) + return self + + if first_call: + self.estimator_ = clone(self.estimator) + if _routing_enabled(): + routed_params = process_routing(self, "partial_fit", **partial_fit_params) + self.estimator_ = clone(self.estimator) + self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + self.estimator_.partial_fit(X, y, **partial_fit_params) + + if hasattr(self.estimator_, "feature_names_in_"): + self.feature_names_in_ = self.estimator_.feature_names_in_ + else: + self._check_feature_names(X, reset=first_call) + + return self + + @property + def n_features_in_(self): + """Number of features seen during `fit`.""" + # For consistency with other estimators we raise a AttributeError so + # that hasattr() fails if the estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.estimator_.n_features_in_ + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(callee="partial_fit", caller="partial_fit") + .add(callee="fit", caller="fit"), + ) + return router + + def _more_tags(self): + return {"allow_nan": _safe_tags(self.estimator, key="allow_nan")} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py new file mode 100644 index 0000000000000000000000000000000000000000..821ef889e7ed90936d6c9898b4c41744d105cb6e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py @@ -0,0 +1,514 @@ +# Author: Nikolay Mayorov +# License: 3-clause BSD + +from numbers import Integral + +import numpy as np +from scipy.sparse import issparse +from scipy.special import digamma + +from ..metrics.cluster import mutual_info_score +from ..neighbors import KDTree, NearestNeighbors +from ..preprocessing import scale +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.multiclass import check_classification_targets +from ..utils.validation import check_array, check_X_y + + +def _compute_mi_cc(x, y, n_neighbors): + """Compute mutual information between two continuous variables. + + Parameters + ---------- + x, y : ndarray, shape (n_samples,) + Samples of two continuous random variables, must have an identical + shape. + + n_neighbors : int + Number of nearest neighbors to search for each point, see [1]_. + + Returns + ------- + mi : float + Estimated mutual information in nat units. If it turned out to be + negative it is replaced by 0. + + Notes + ----- + True mutual information can't be negative. If its estimate by a numerical + method is negative, it means (providing the method is adequate) that the + mutual information is close to 0 and replacing it by 0 is a reasonable + strategy. + + References + ---------- + .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + """ + n_samples = x.size + + x = x.reshape((-1, 1)) + y = y.reshape((-1, 1)) + xy = np.hstack((x, y)) + + # Here we rely on NearestNeighbors to select the fastest algorithm. + nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors) + + nn.fit(xy) + radius = nn.kneighbors()[0] + radius = np.nextafter(radius[:, -1], 0) + + # KDTree is explicitly fit to allow for the querying of number of + # neighbors within a specified radius + kd = KDTree(x, metric="chebyshev") + nx = kd.query_radius(x, radius, count_only=True, return_distance=False) + nx = np.array(nx) - 1.0 + + kd = KDTree(y, metric="chebyshev") + ny = kd.query_radius(y, radius, count_only=True, return_distance=False) + ny = np.array(ny) - 1.0 + + mi = ( + digamma(n_samples) + + digamma(n_neighbors) + - np.mean(digamma(nx + 1)) + - np.mean(digamma(ny + 1)) + ) + + return max(0, mi) + + +def _compute_mi_cd(c, d, n_neighbors): + """Compute mutual information between continuous and discrete variables. + + Parameters + ---------- + c : ndarray, shape (n_samples,) + Samples of a continuous random variable. + + d : ndarray, shape (n_samples,) + Samples of a discrete random variable. + + n_neighbors : int + Number of nearest neighbors to search for each point, see [1]_. + + Returns + ------- + mi : float + Estimated mutual information in nat units. If it turned out to be + negative it is replaced by 0. + + Notes + ----- + True mutual information can't be negative. If its estimate by a numerical + method is negative, it means (providing the method is adequate) that the + mutual information is close to 0 and replacing it by 0 is a reasonable + strategy. + + References + ---------- + .. [1] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + """ + n_samples = c.shape[0] + c = c.reshape((-1, 1)) + + radius = np.empty(n_samples) + label_counts = np.empty(n_samples) + k_all = np.empty(n_samples) + nn = NearestNeighbors() + for label in np.unique(d): + mask = d == label + count = np.sum(mask) + if count > 1: + k = min(n_neighbors, count - 1) + nn.set_params(n_neighbors=k) + nn.fit(c[mask]) + r = nn.kneighbors()[0] + radius[mask] = np.nextafter(r[:, -1], 0) + k_all[mask] = k + label_counts[mask] = count + + # Ignore points with unique labels. + mask = label_counts > 1 + n_samples = np.sum(mask) + label_counts = label_counts[mask] + k_all = k_all[mask] + c = c[mask] + radius = radius[mask] + + kd = KDTree(c) + m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) + m_all = np.array(m_all) + + mi = ( + digamma(n_samples) + + np.mean(digamma(k_all)) + - np.mean(digamma(label_counts)) + - np.mean(digamma(m_all)) + ) + + return max(0, mi) + + +def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3): + """Compute mutual information between two variables. + + This is a simple wrapper which selects a proper function to call based on + whether `x` and `y` are discrete or not. + """ + if x_discrete and y_discrete: + return mutual_info_score(x, y) + elif x_discrete and not y_discrete: + return _compute_mi_cd(y, x, n_neighbors) + elif not x_discrete and y_discrete: + return _compute_mi_cd(x, y, n_neighbors) + else: + return _compute_mi_cc(x, y, n_neighbors) + + +def _iterate_columns(X, columns=None): + """Iterate over columns of a matrix. + + Parameters + ---------- + X : ndarray or csc_matrix, shape (n_samples, n_features) + Matrix over which to iterate. + + columns : iterable or None, default=None + Indices of columns to iterate over. If None, iterate over all columns. + + Yields + ------ + x : ndarray, shape (n_samples,) + Columns of `X` in dense format. + """ + if columns is None: + columns = range(X.shape[1]) + + if issparse(X): + for i in columns: + x = np.zeros(X.shape[0]) + start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1] + x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr] + yield x + else: + for i in columns: + yield X[:, i] + + +def _estimate_mi( + X, + y, + discrete_features="auto", + discrete_target=False, + n_neighbors=3, + copy=True, + random_state=None, +): + """Estimate mutual information between the features and the target. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : {'auto', bool, array-like}, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + discrete_target : bool, default=False + Whether to consider `y` as a discrete variable. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [1]_ and [2]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. A negative value will be replaced by 0. + + References + ---------- + .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [2] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + """ + X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target) + n_samples, n_features = X.shape + + if isinstance(discrete_features, (str, bool)): + if isinstance(discrete_features, str): + if discrete_features == "auto": + discrete_features = issparse(X) + else: + raise ValueError("Invalid string value for discrete_features.") + discrete_mask = np.empty(n_features, dtype=bool) + discrete_mask.fill(discrete_features) + else: + discrete_features = check_array(discrete_features, ensure_2d=False) + if discrete_features.dtype != "bool": + discrete_mask = np.zeros(n_features, dtype=bool) + discrete_mask[discrete_features] = True + else: + discrete_mask = discrete_features + + continuous_mask = ~discrete_mask + if np.any(continuous_mask) and issparse(X): + raise ValueError("Sparse matrix `X` can't have continuous features.") + + rng = check_random_state(random_state) + if np.any(continuous_mask): + X = X.astype(np.float64, copy=copy) + X[:, continuous_mask] = scale( + X[:, continuous_mask], with_mean=False, copy=False + ) + + # Add small noise to continuous features as advised in Kraskov et. al. + means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0)) + X[:, continuous_mask] += ( + 1e-10 + * means + * rng.standard_normal(size=(n_samples, np.sum(continuous_mask))) + ) + + if not discrete_target: + y = scale(y, with_mean=False) + y += ( + 1e-10 + * np.maximum(1, np.mean(np.abs(y))) + * rng.standard_normal(size=n_samples) + ) + + mi = [ + _compute_mi(x, y, discrete_feature, discrete_target, n_neighbors) + for x, discrete_feature in zip(_iterate_columns(X), discrete_mask) + ] + + return np.array(mi) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def mutual_info_regression( + X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None +): + """Estimate mutual information for a continuous target variable. + + Mutual information (MI) [1]_ between two random variables is a non-negative + value, which measures the dependency between the variables. It is equal + to zero if and only if two random variables are independent, and higher + values mean higher dependency. + + The function relies on nonparametric methods based on entropy estimation + from k-nearest neighbors distances as described in [2]_ and [3]_. Both + methods are based on the idea originally proposed in [4]_. + + It can be used for univariate features selection, read more in the + :ref:`User Guide `. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : {'auto', bool, array-like}, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [2]_ and [3]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. + + Notes + ----- + 1. The term "discrete features" is used instead of naming them + "categorical", because it describes the essence more accurately. + For example, pixel intensities of an image are discrete features + (but hardly categorical) and you will get better results if mark them + as such. Also note, that treating a continuous variable as discrete and + vice versa will usually give incorrect results, so be attentive about + that. + 2. True mutual information can't be negative. If its estimate turns out + to be negative, it is replaced by zero. + + References + ---------- + .. [1] `Mutual Information + `_ + on Wikipedia. + .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [3] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy + of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16 + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import mutual_info_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> mutual_info_regression(X, y) + array([0.1..., 2.6... , 0.0...]) + """ + return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def mutual_info_classif( + X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None +): + """Estimate mutual information for a discrete target variable. + + Mutual information (MI) [1]_ between two random variables is a non-negative + value, which measures the dependency between the variables. It is equal + to zero if and only if two random variables are independent, and higher + values mean higher dependency. + + The function relies on nonparametric methods based on entropy estimation + from k-nearest neighbors distances as described in [2]_ and [3]_. Both + methods are based on the idea originally proposed in [4]_. + + It can be used for univariate features selection, read more in the + :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : 'auto', bool or array-like, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [2]_ and [3]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. + + Notes + ----- + 1. The term "discrete features" is used instead of naming them + "categorical", because it describes the essence more accurately. + For example, pixel intensities of an image are discrete features + (but hardly categorical) and you will get better results if mark them + as such. Also note, that treating a continuous variable as discrete and + vice versa will usually give incorrect results, so be attentive about + that. + 2. True mutual information can't be negative. If its estimate turns out + to be negative, it is replaced by zero. + + References + ---------- + .. [1] `Mutual Information + `_ + on Wikipedia. + .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [3] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy + of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.feature_selection import mutual_info_classif + >>> X, y = make_classification( + ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, + ... shuffle=False, random_state=42 + ... ) + >>> mutual_info_classif(X, y) + array([0.58..., 0.10..., 0.19..., 0.09... , 0. , + 0. , 0. , 0. , 0. , 0. ]) + """ + check_classification_targets(y) + return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d1b71e08609a6fdc9dc8ad6db29b96c1da0822 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py @@ -0,0 +1,792 @@ +# Authors: Alexandre Gramfort +# Vincent Michel +# Gilles Louppe +# +# License: BSD 3 clause + +"""Recursive feature elimination for feature ranking""" + +from numbers import Integral + +import numpy as np +from joblib import effective_n_jobs + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..metrics import check_scoring +from ..model_selection import check_cv +from ..model_selection._validation import _score +from ..utils._param_validation import HasMethods, Interval, RealNotInt +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.metaestimators import _safe_split, available_if +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin, _get_feature_importances + + +def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer): + """ + Return the score for a fit across one fold. + """ + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + return rfe._fit( + X_train, + y_train, + lambda estimator, features: _score( + # TODO(SLEP6): pass score_params here + estimator, + X_test[:, features], + y_test, + scorer, + score_params=None, + ), + ).scores_ + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `estimator_` if available, otherwise we check the + unfitted `estimator`. We raise the original `AttributeError` if `attr` does + not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimator_"): + getattr(self.estimator_, attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class RFE(_RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator): + """Feature ranking with recursive feature elimination. + + Given an external estimator that assigns weights to features (e.g., the + coefficients of a linear model), the goal of recursive feature elimination + (RFE) is to select features by recursively considering smaller and smaller + sets of features. First, the estimator is trained on the initial set of + features and the importance of each feature is obtained either through + any specific attribute or callable. + Then, the least important features are pruned from current set of features. + That procedure is recursively repeated on the pruned set until the desired + number of features to select is eventually reached. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : ``Estimator`` instance + A supervised learning estimator with a ``fit`` method that provides + information about feature importance + (e.g. `coef_`, `feature_importances_`). + + n_features_to_select : int or float, default=None + The number of features to select. If `None`, half of the features are + selected. If integer, the parameter is the absolute number of features + to select. If float between 0 and 1, it is the fraction of features to + select. + + .. versionchanged:: 0.24 + Added float values for fractions. + + step : int or float, default=1 + If greater than or equal to 1, then ``step`` corresponds to the + (integer) number of features to remove at each iteration. + If within (0.0, 1.0), then ``step`` corresponds to the percentage + (rounded down) of features to remove at each iteration. + + verbose : int, default=0 + Controls verbosity of output. + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a `coef_` + or `feature_importances_` attributes of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance (implemented with `attrgetter`). + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The classes labels. Only available when `estimator` is a classifier. + + estimator_ : ``Estimator`` instance + The fitted estimator used to select features. + + n_features_ : int + The number of selected features. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + ranking_ : ndarray of shape (n_features,) + The feature ranking, such that ``ranking_[i]`` corresponds to the + ranking position of the i-th feature. Selected (i.e., estimated + best) features are assigned rank 1. + + support_ : ndarray of shape (n_features,) + The mask of selected features. + + See Also + -------- + RFECV : Recursive feature elimination with built-in cross-validated + selection of the best number of features. + SelectFromModel : Feature selection based on thresholds of importance + weights. + SequentialFeatureSelector : Sequential cross-validation based feature + selection. Does not rely on importance weights. + + Notes + ----- + Allows NaN/Inf in the input if the underlying estimator does as well. + + References + ---------- + + .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection + for cancer classification using support vector machines", + Mach. Learn., 46(1-3), 389--422, 2002. + + Examples + -------- + The following example shows how to retrieve the 5 most informative + features in the Friedman #1 dataset. + + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.feature_selection import RFE + >>> from sklearn.svm import SVR + >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + >>> estimator = SVR(kernel="linear") + >>> selector = RFE(estimator, n_features_to_select=5, step=1) + >>> selector = selector.fit(X, y) + >>> selector.support_ + array([ True, True, True, True, True, False, False, False, False, + False]) + >>> selector.ranking_ + array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_features_to_select": [ + None, + Interval(RealNotInt, 0, 1, closed="right"), + Interval(Integral, 0, None, closed="neither"), + ], + "step": [ + Interval(Integral, 0, None, closed="neither"), + Interval(RealNotInt, 0, 1, closed="neither"), + ], + "verbose": ["verbose"], + "importance_getter": [str, callable], + } + + def __init__( + self, + estimator, + *, + n_features_to_select=None, + step=1, + verbose=0, + importance_getter="auto", + ): + self.estimator = estimator + self.n_features_to_select = n_features_to_select + self.step = step + self.importance_getter = importance_getter + self.verbose = verbose + + @property + def _estimator_type(self): + return self.estimator._estimator_type + + @property + def classes_(self): + """Classes labels available when `estimator` is a classifier. + + Returns + ------- + ndarray of shape (n_classes,) + """ + return self.estimator_.classes_ + + @_fit_context( + # RFE.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit the RFE model and then the underlying estimator on the selected features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values. + + **fit_params : dict + Additional parameters passed to the `fit` method of the underlying + estimator. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", **fit_params) + return self._fit(X, y, **fit_params) + + def _fit(self, X, y, step_score=None, **fit_params): + # Parameter step_score controls the calculation of self.scores_ + # step_score is not exposed to users + # and is used when implementing RFECV + # self.scores_ will not be calculated when calling _fit through fit + + X, y = self._validate_data( + X, + y, + accept_sparse="csc", + ensure_min_features=2, + force_all_finite=False, + multi_output=True, + ) + + # Initialization + n_features = X.shape[1] + if self.n_features_to_select is None: + n_features_to_select = n_features // 2 + elif isinstance(self.n_features_to_select, Integral): # int + n_features_to_select = self.n_features_to_select + else: # float + n_features_to_select = int(n_features * self.n_features_to_select) + + if 0.0 < self.step < 1.0: + step = int(max(1, self.step * n_features)) + else: + step = int(self.step) + + support_ = np.ones(n_features, dtype=bool) + ranking_ = np.ones(n_features, dtype=int) + + if step_score: + self.scores_ = [] + + # Elimination + while np.sum(support_) > n_features_to_select: + # Remaining features + features = np.arange(n_features)[support_] + + # Rank the remaining features + estimator = clone(self.estimator) + if self.verbose > 0: + print("Fitting estimator with %d features." % np.sum(support_)) + + estimator.fit(X[:, features], y, **fit_params) + + # Get importance and rank them + importances = _get_feature_importances( + estimator, + self.importance_getter, + transform_func="square", + ) + ranks = np.argsort(importances) + + # for sparse case ranks is matrix + ranks = np.ravel(ranks) + + # Eliminate the worse features + threshold = min(step, np.sum(support_) - n_features_to_select) + + # Compute step score on the previous selection iteration + # because 'estimator' must use features + # that have not been eliminated yet + if step_score: + self.scores_.append(step_score(estimator, features)) + support_[features[ranks][:threshold]] = False + ranking_[np.logical_not(support_)] += 1 + + # Set final attributes + features = np.arange(n_features)[support_] + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X[:, features], y, **fit_params) + + # Compute step score when only n_features_to_select features left + if step_score: + self.scores_.append(step_score(self.estimator_, features)) + self.n_features_ = support_.sum() + self.support_ = support_ + self.ranking_ = ranking_ + + return self + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Reduce X to the selected features and predict using the estimator. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + y : array of shape [n_samples] + The predicted target values. + """ + check_is_fitted(self) + return self.estimator_.predict(self.transform(X)) + + @available_if(_estimator_has("score")) + def score(self, X, y, **fit_params): + """Reduce X to the selected features and return the score of the estimator. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + y : array of shape [n_samples] + The target values. + + **fit_params : dict + Parameters to pass to the `score` method of the underlying + estimator. + + .. versionadded:: 1.0 + + Returns + ------- + score : float + Score of the underlying base estimator computed with the selected + features returned by `rfe.transform(X)` and `y`. + """ + check_is_fitted(self) + return self.estimator_.score(self.transform(X), y, **fit_params) + + def _get_support_mask(self): + check_is_fitted(self) + return self.support_ + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + score : array, shape = [n_samples, n_classes] or [n_samples] + The decision function of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + Regression and binary classification produce an array of shape + [n_samples]. + """ + check_is_fitted(self) + return self.estimator_.decision_function(self.transform(X)) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Predict class probabilities for X. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + p : array of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.estimator_.predict_proba(self.transform(X)) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + p : array of shape (n_samples, n_classes) + The class log-probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.estimator_.predict_log_proba(self.transform(X)) + + def _more_tags(self): + tags = { + "poor_score": True, + "requires_y": True, + "allow_nan": True, + } + + # Adjust allow_nan if estimator explicitly defines `allow_nan`. + if hasattr(self.estimator, "_get_tags"): + tags["allow_nan"] = self.estimator._get_tags()["allow_nan"] + + return tags + + +class RFECV(RFE): + """Recursive feature elimination with cross-validation to select features. + + The number of features selected is tuned automatically by fitting an :class:`RFE` + selector on the different cross-validation splits (provided by the `cv` parameter). + The performance of the :class:`RFE` selector are evaluated using `scorer` for + different number of selected features and aggregated together. Finally, the scores + are averaged across folds and the number of features selected is set to the number + of features that maximize the cross-validation score. + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : ``Estimator`` instance + A supervised learning estimator with a ``fit`` method that provides + information about feature importance either through a ``coef_`` + attribute or through a ``feature_importances_`` attribute. + + step : int or float, default=1 + If greater than or equal to 1, then ``step`` corresponds to the + (integer) number of features to remove at each iteration. + If within (0.0, 1.0), then ``step`` corresponds to the percentage + (rounded down) of features to remove at each iteration. + Note that the last iteration may remove fewer than ``step`` features in + order to reach ``min_features_to_select``. + + min_features_to_select : int, default=1 + The minimum number of features to be selected. This number of features + will always be scored, even if the difference between the original + feature count and ``min_features_to_select`` isn't divisible by + ``step``. + + .. versionadded:: 0.20 + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. If the + estimator is a classifier or if ``y`` is neither binary nor multiclass, + :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value of None changed from 3-fold to 5-fold. + + scoring : str, callable or None, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + verbose : int, default=0 + Controls verbosity of output. + + n_jobs : int or None, default=None + Number of cores to run in parallel while fitting across folds. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.18 + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a `coef_` + or `feature_importances_` attributes of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance. + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The classes labels. Only available when `estimator` is a classifier. + + estimator_ : ``Estimator`` instance + The fitted estimator used to select features. + + cv_results_ : dict of ndarrays + A dict with keys: + + split(k)_test_score : ndarray of shape (n_subsets_of_features,) + The cross-validation scores across (k)th fold. + + mean_test_score : ndarray of shape (n_subsets_of_features,) + Mean of scores over the folds. + + std_test_score : ndarray of shape (n_subsets_of_features,) + Standard deviation of scores over the folds. + + .. versionadded:: 1.0 + + n_features_ : int + The number of selected features with cross-validation. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + ranking_ : narray of shape (n_features,) + The feature ranking, such that `ranking_[i]` + corresponds to the ranking + position of the i-th feature. + Selected (i.e., estimated best) + features are assigned rank 1. + + support_ : ndarray of shape (n_features,) + The mask of selected features. + + See Also + -------- + RFE : Recursive feature elimination. + + Notes + ----- + The size of all values in ``cv_results_`` is equal to + ``ceil((n_features - min_features_to_select) / step) + 1``, + where step is the number of features removed at each iteration. + + Allows NaN/Inf in the input if the underlying estimator does as well. + + References + ---------- + + .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection + for cancer classification using support vector machines", + Mach. Learn., 46(1-3), 389--422, 2002. + + Examples + -------- + The following example shows how to retrieve the a-priori not known 5 + informative features in the Friedman #1 dataset. + + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.feature_selection import RFECV + >>> from sklearn.svm import SVR + >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + >>> estimator = SVR(kernel="linear") + >>> selector = RFECV(estimator, step=1, cv=5) + >>> selector = selector.fit(X, y) + >>> selector.support_ + array([ True, True, True, True, True, False, False, False, False, + False]) + >>> selector.ranking_ + array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) + """ + + _parameter_constraints: dict = { + **RFE._parameter_constraints, + "min_features_to_select": [Interval(Integral, 0, None, closed="neither")], + "cv": ["cv_object"], + "scoring": [None, str, callable], + "n_jobs": [None, Integral], + } + _parameter_constraints.pop("n_features_to_select") + + def __init__( + self, + estimator, + *, + step=1, + min_features_to_select=1, + cv=None, + scoring=None, + verbose=0, + n_jobs=None, + importance_getter="auto", + ): + self.estimator = estimator + self.step = step + self.importance_getter = importance_getter + self.cv = cv + self.scoring = scoring + self.verbose = verbose + self.n_jobs = n_jobs + self.min_features_to_select = min_features_to_select + + @_fit_context( + # RFECV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, groups=None): + """Fit the RFE model and automatically tune the number of selected features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the total number of features. + + y : array-like of shape (n_samples,) + Target values (integers for classification, real numbers for + regression). + + groups : array-like of shape (n_samples,) or None, default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`~sklearn.model_selection.GroupKFold`). + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", groups=groups) + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + ensure_min_features=2, + force_all_finite=False, + multi_output=True, + ) + + # Initialization + cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) + scorer = check_scoring(self.estimator, scoring=self.scoring) + n_features = X.shape[1] + + if 0.0 < self.step < 1.0: + step = int(max(1, self.step * n_features)) + else: + step = int(self.step) + + # Build an RFE object, which will evaluate and score each possible + # feature count, down to self.min_features_to_select + rfe = RFE( + estimator=self.estimator, + n_features_to_select=self.min_features_to_select, + importance_getter=self.importance_getter, + step=self.step, + verbose=self.verbose, + ) + + # Determine the number of subsets of features by fitting across + # the train folds and choosing the "features_to_select" parameter + # that gives the least averaged error across all folds. + + # Note that joblib raises a non-picklable error for bound methods + # even if n_jobs is set to 1 with the default multiprocessing + # backend. + # This branching is done so that to + # make sure that user code that sets n_jobs to 1 + # and provides bound methods as scorers is not broken with the + # addition of n_jobs parameter in version 0.18. + + if effective_n_jobs(self.n_jobs) == 1: + parallel, func = list, _rfe_single_fit + else: + parallel = Parallel(n_jobs=self.n_jobs) + func = delayed(_rfe_single_fit) + + scores = parallel( + func(rfe, self.estimator, X, y, train, test, scorer) + for train, test in cv.split(X, y, groups) + ) + + scores = np.array(scores) + scores_sum = np.sum(scores, axis=0) + scores_sum_rev = scores_sum[::-1] + argmax_idx = len(scores_sum) - np.argmax(scores_sum_rev) - 1 + n_features_to_select = max( + n_features - (argmax_idx * step), self.min_features_to_select + ) + + # Re-execute an elimination with best_k over the whole set + rfe = RFE( + estimator=self.estimator, + n_features_to_select=n_features_to_select, + step=self.step, + importance_getter=self.importance_getter, + verbose=self.verbose, + ) + + rfe.fit(X, y) + + # Set final attributes + self.support_ = rfe.support_ + self.n_features_ = rfe.n_features_ + self.ranking_ = rfe.ranking_ + self.estimator_ = clone(self.estimator) + self.estimator_.fit(self._transform(X), y) + + # reverse to stay consistent with before + scores_rev = scores[:, ::-1] + self.cv_results_ = {} + self.cv_results_["mean_test_score"] = np.mean(scores_rev, axis=0) + self.cv_results_["std_test_score"] = np.std(scores_rev, axis=0) + + for i in range(scores.shape[0]): + self.cv_results_[f"split{i}_test_score"] = scores_rev[i] + + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..5a90d46c9758b47a92121b91bd6e049207dc1c48 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py @@ -0,0 +1,300 @@ +""" +Sequential feature selection +""" +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..metrics import get_scorer_names +from ..model_selection import check_cv, cross_val_score +from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +class SequentialFeatureSelector( + _RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator +): + """Transformer that performs Sequential Feature Selection. + + This Sequential Feature Selector adds (forward selection) or + removes (backward selection) features to form a feature subset in a + greedy fashion. At each stage, this estimator chooses the best feature to + add or remove based on the cross-validation score of an estimator. In + the case of unsupervised learning, this Sequential Feature Selector + looks only at the features (X), not the desired outputs (y). + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + estimator : estimator instance + An unfitted estimator. + + n_features_to_select : "auto", int or float, default="auto" + If `"auto"`, the behaviour depends on the `tol` parameter: + + - if `tol` is not `None`, then features are selected while the score + change does not exceed `tol`. + - otherwise, half of the features are selected. + + If integer, the parameter is the absolute number of features to select. + If float between 0 and 1, it is the fraction of features to select. + + .. versionadded:: 1.1 + The option `"auto"` was added in version 1.1. + + .. versionchanged:: 1.3 + The default changed from `"warn"` to `"auto"` in 1.3. + + tol : float, default=None + If the score is not incremented by at least `tol` between two + consecutive feature additions or removals, stop adding or removing. + + `tol` can be negative when removing features using `direction="backward"`. + It can be useful to reduce the number of features at the cost of a small + decrease in the score. + + `tol` is enabled only when `n_features_to_select` is `"auto"`. + + .. versionadded:: 1.1 + + direction : {'forward', 'backward'}, default='forward' + Whether to perform forward selection or backward selection. + + scoring : str or callable, default=None + A single str (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + + NOTE that when using a custom scorer, it should return a single + value. + + If None, the estimator's score method is used. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other + cases, :class:`~sklearn.model_selection.KFold` is used. These splitters + are instantiated with `shuffle=False` so the splits will be the same + across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + n_jobs : int, default=None + Number of jobs to run in parallel. When evaluating a new feature to + add or remove, the cross-validation procedure is parallel over the + folds. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_features_to_select_ : int + The number of features that were selected. + + support_ : ndarray of shape (n_features,), dtype=bool + The mask of selected features. + + See Also + -------- + GenericUnivariateSelect : Univariate feature selector with configurable + strategy. + RFE : Recursive feature elimination based on importance weights. + RFECV : Recursive feature elimination based on importance weights, with + automatic selection of the number of features. + SelectFromModel : Feature selection based on thresholds of importance + weights. + + Examples + -------- + >>> from sklearn.feature_selection import SequentialFeatureSelector + >>> from sklearn.neighbors import KNeighborsClassifier + >>> from sklearn.datasets import load_iris + >>> X, y = load_iris(return_X_y=True) + >>> knn = KNeighborsClassifier(n_neighbors=3) + >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3) + >>> sfs.fit(X, y) + SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3), + n_features_to_select=3) + >>> sfs.get_support() + array([ True, False, True, True]) + >>> sfs.transform(X).shape + (150, 3) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_features_to_select": [ + StrOptions({"auto"}), + Interval(RealNotInt, 0, 1, closed="right"), + Interval(Integral, 0, None, closed="neither"), + ], + "tol": [None, Interval(Real, None, None, closed="neither")], + "direction": [StrOptions({"forward", "backward"})], + "scoring": [None, StrOptions(set(get_scorer_names())), callable], + "cv": ["cv_object"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + estimator, + *, + n_features_to_select="auto", + tol=None, + direction="forward", + scoring=None, + cv=5, + n_jobs=None, + ): + self.estimator = estimator + self.n_features_to_select = n_features_to_select + self.tol = tol + self.direction = direction + self.scoring = scoring + self.cv = cv + self.n_jobs = n_jobs + + @_fit_context( + # SequentialFeatureSelector.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Learn the features to select from X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of predictors. + + y : array-like of shape (n_samples,), default=None + Target values. This parameter may be ignored for + unsupervised learning. + + Returns + ------- + self : object + Returns the instance itself. + """ + tags = self._get_tags() + X = self._validate_data( + X, + accept_sparse="csc", + ensure_min_features=2, + force_all_finite=not tags.get("allow_nan", True), + ) + n_features = X.shape[1] + + if self.n_features_to_select == "auto": + if self.tol is not None: + # With auto feature selection, `n_features_to_select_` will be updated + # to `support_.sum()` after features are selected. + self.n_features_to_select_ = n_features - 1 + else: + self.n_features_to_select_ = n_features // 2 + elif isinstance(self.n_features_to_select, Integral): + if self.n_features_to_select >= n_features: + raise ValueError("n_features_to_select must be < n_features.") + self.n_features_to_select_ = self.n_features_to_select + elif isinstance(self.n_features_to_select, Real): + self.n_features_to_select_ = int(n_features * self.n_features_to_select) + + if self.tol is not None and self.tol < 0 and self.direction == "forward": + raise ValueError("tol must be positive when doing forward selection") + + cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) + + cloned_estimator = clone(self.estimator) + + # the current mask corresponds to the set of features: + # - that we have already *selected* if we do forward selection + # - that we have already *excluded* if we do backward selection + current_mask = np.zeros(shape=n_features, dtype=bool) + n_iterations = ( + self.n_features_to_select_ + if self.n_features_to_select == "auto" or self.direction == "forward" + else n_features - self.n_features_to_select_ + ) + + old_score = -np.inf + is_auto_select = self.tol is not None and self.n_features_to_select == "auto" + for _ in range(n_iterations): + new_feature_idx, new_score = self._get_best_new_feature_score( + cloned_estimator, X, y, cv, current_mask + ) + if is_auto_select and ((new_score - old_score) < self.tol): + break + + old_score = new_score + current_mask[new_feature_idx] = True + + if self.direction == "backward": + current_mask = ~current_mask + + self.support_ = current_mask + self.n_features_to_select_ = self.support_.sum() + + return self + + def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask): + # Return the best new feature and its score to add to the current_mask, + # i.e. return the best new feature and its score to add (resp. remove) + # when doing forward selection (resp. backward selection). + # Feature will be added if the current score and past score are greater + # than tol when n_feature is auto, + candidate_feature_indices = np.flatnonzero(~current_mask) + scores = {} + for feature_idx in candidate_feature_indices: + candidate_mask = current_mask.copy() + candidate_mask[feature_idx] = True + if self.direction == "backward": + candidate_mask = ~candidate_mask + X_new = X[:, candidate_mask] + scores[feature_idx] = cross_val_score( + estimator, + X_new, + y, + cv=cv, + scoring=self.scoring, + n_jobs=self.n_jobs, + ).mean() + new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx]) + return new_feature_idx, scores[new_feature_idx] + + def _get_support_mask(self): + check_is_fitted(self) + return self.support_ + + def _more_tags(self): + return { + "allow_nan": _safe_tags(self.estimator, key="allow_nan"), + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py new file mode 100644 index 0000000000000000000000000000000000000000..df1b5072ce7415c21b1e3df922e742d3676b168c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py @@ -0,0 +1,1161 @@ +"""Univariate features selection.""" + +# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay. +# L. Buitinck, A. Joly +# License: BSD 3 clause + + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import special, stats +from scipy.sparse import issparse + +from ..base import BaseEstimator, _fit_context +from ..preprocessing import LabelBinarizer +from ..utils import as_float_array, check_array, check_X_y, safe_mask, safe_sqr +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, safe_sparse_dot +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +def _clean_nans(scores): + """ + Fixes Issue #1240: NaNs can't be properly compared, so change them to the + smallest value of scores's dtype. -inf seems to be unreliable. + """ + # XXX where should this function be called? fit? scoring functions + # themselves? + scores = as_float_array(scores, copy=True) + scores[np.isnan(scores)] = np.finfo(scores.dtype).min + return scores + + +###################################################################### +# Scoring functions + + +# The following function is a rewriting of scipy.stats.f_oneway +# Contrary to the scipy.stats.f_oneway implementation it does not +# copy the data while keeping the inputs unchanged. +def f_oneway(*args): + """Perform a 1-way ANOVA. + + The one-way ANOVA tests the null hypothesis that 2 or more groups have + the same population mean. The test is applied to samples from two or + more groups, possibly with differing sizes. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *args : {array-like, sparse matrix} + Sample1, sample2... The sample measurements should be given as + arguments. + + Returns + ------- + f_statistic : float + The computed F-value of the test. + p_value : float + The associated p-value from the F-distribution. + + Notes + ----- + The ANOVA test has important assumptions that must be satisfied in order + for the associated p-value to be valid. + + 1. The samples are independent + 2. Each sample is from a normally distributed population + 3. The population standard deviations of the groups are all equal. This + property is known as homoscedasticity. + + If these assumptions are not true for a given set of data, it may still be + possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although + with some loss of power. + + The algorithm is from Heiman[2], pp.394-7. + + See ``scipy.stats.f_oneway`` that should give the same results while + being less efficient. + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 14. + http://vassarstats.net/textbook + + .. [2] Heiman, G.W. Research Methods in Statistics. 2002. + """ + n_classes = len(args) + args = [as_float_array(a) for a in args] + n_samples_per_class = np.array([a.shape[0] for a in args]) + n_samples = np.sum(n_samples_per_class) + ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args) + sums_args = [np.asarray(a.sum(axis=0)) for a in args] + square_of_sums_alldata = sum(sums_args) ** 2 + square_of_sums_args = [s**2 for s in sums_args] + sstot = ss_alldata - square_of_sums_alldata / float(n_samples) + ssbn = 0.0 + for k, _ in enumerate(args): + ssbn += square_of_sums_args[k] / n_samples_per_class[k] + ssbn -= square_of_sums_alldata / float(n_samples) + sswn = sstot - ssbn + dfbn = n_classes - 1 + dfwn = n_samples - n_classes + msb = ssbn / float(dfbn) + msw = sswn / float(dfwn) + constant_features_idx = np.where(msw == 0.0)[0] + if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size: + warnings.warn("Features %s are constant." % constant_features_idx, UserWarning) + f = msb / msw + # flatten matrix to vector in sparse case + f = np.asarray(f).ravel() + prob = special.fdtrc(dfbn, dfwn, f) + return f, prob + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def f_classif(X, y): + """Compute the ANOVA F-value for the provided sample. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The set of regressors that will be tested sequentially. + + y : array-like of shape (n_samples,) + The target vector. + + Returns + ------- + f_statistic : ndarray of shape (n_features,) + F-statistic for each feature. + + p_values : ndarray of shape (n_features,) + P-values associated with the F-statistic. + + See Also + -------- + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.feature_selection import f_classif + >>> X, y = make_classification( + ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, + ... shuffle=False, random_state=42 + ... ) + >>> f_statistic, p_values = f_classif(X, y) + >>> f_statistic + array([2.2...e+02, 7.0...e-01, 1.6...e+00, 9.3...e-01, + 5.4...e+00, 3.2...e-01, 4.7...e-02, 5.7...e-01, + 7.5...e-01, 8.9...e-02]) + >>> p_values + array([7.1...e-27, 4.0...e-01, 1.9...e-01, 3.3...e-01, + 2.2...e-02, 5.7...e-01, 8.2...e-01, 4.5...e-01, + 3.8...e-01, 7.6...e-01]) + """ + X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"]) + args = [X[safe_mask(X, y == k)] for k in np.unique(y)] + return f_oneway(*args) + + +def _chisquare(f_obs, f_exp): + """Fast replacement for scipy.stats.chisquare. + + Version from https://github.com/scipy/scipy/pull/2525 with additional + optimizations. + """ + f_obs = np.asarray(f_obs, dtype=np.float64) + + k = len(f_obs) + # Reuse f_obs for chi-squared statistics + chisq = f_obs + chisq -= f_exp + chisq **= 2 + with np.errstate(invalid="ignore"): + chisq /= f_exp + chisq = chisq.sum(axis=0) + return chisq, special.chdtrc(k - 1, chisq) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def chi2(X, y): + """Compute chi-squared stats between each non-negative feature and class. + + This score can be used to select the `n_features` features with the + highest values for the test chi-squared statistic from X, which must + contain only **non-negative features** such as booleans or frequencies + (e.g., term counts in document classification), relative to the classes. + + Recall that the chi-square test measures dependence between stochastic + variables, so using this function "weeds out" the features that are the + most likely to be independent of class and therefore irrelevant for + classification. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample vectors. + + y : array-like of shape (n_samples,) + Target vector (class labels). + + Returns + ------- + chi2 : ndarray of shape (n_features,) + Chi2 statistics for each feature. + + p_values : ndarray of shape (n_features,) + P-values for each feature. + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + f_regression : F-value between label/feature for regression tasks. + + Notes + ----- + Complexity of this algorithm is O(n_classes * n_features). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.feature_selection import chi2 + >>> X = np.array([[1, 1, 3], + ... [0, 1, 5], + ... [5, 4, 1], + ... [6, 6, 2], + ... [1, 4, 0], + ... [0, 0, 0]]) + >>> y = np.array([1, 1, 0, 0, 2, 2]) + >>> chi2_stats, p_values = chi2(X, y) + >>> chi2_stats + array([15.3..., 6.5 , 8.9...]) + >>> p_values + array([0.0004..., 0.0387..., 0.0116... ]) + """ + + # XXX: we might want to do some of the following in logspace instead for + # numerical stability. + # Converting X to float allows getting better performance for the + # safe_sparse_dot call made below. + X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32)) + if np.any((X.data if issparse(X) else X) < 0): + raise ValueError("Input X must be non-negative.") + + # Use a sparse representation for Y by default to reduce memory usage when + # y has many unique classes. + Y = LabelBinarizer(sparse_output=True).fit_transform(y) + if Y.shape[1] == 1: + Y = Y.toarray() + Y = np.append(1 - Y, Y, axis=1) + + observed = safe_sparse_dot(Y.T, X) # n_classes * n_features + + if issparse(observed): + # convert back to a dense array before calling _chisquare + # XXX: could _chisquare be reimplement to accept sparse matrices for + # cases where both n_classes and n_features are large (and X is + # sparse)? + observed = observed.toarray() + + feature_count = X.sum(axis=0).reshape(1, -1) + class_prob = Y.mean(axis=0).reshape(1, -1) + expected = np.dot(class_prob.T, feature_count) + + return _chisquare(observed, expected) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "center": ["boolean"], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def r_regression(X, y, *, center=True, force_finite=True): + """Compute Pearson's r for each features and the target. + + Pearson's r is also known as the Pearson correlation coefficient. + + Linear model for testing the individual effect of each of many regressors. + This is a scoring function to be used in a feature selection procedure, not + a free standing feature selection procedure. + + The cross correlation between each regressor and the target is computed + as:: + + E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) + + For more on usage see the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix. + + y : array-like of shape (n_samples,) + The target vector. + + center : bool, default=True + Whether or not to center the data matrix `X` and the target vector `y`. + By default, `X` and `y` will be centered. + + force_finite : bool, default=True + Whether or not to force the Pearson's R correlation to be finite. + In the particular case where some features in `X` or the target `y` + are constant, the Pearson's R correlation is not defined. When + `force_finite=False`, a correlation of `np.nan` is returned to + acknowledge this case. When `force_finite=True`, this value will be + forced to a minimal correlation of `0.0`. + + .. versionadded:: 1.1 + + Returns + ------- + correlation_coefficient : ndarray of shape (n_features,) + Pearson's R correlation coefficients of features. + + See Also + -------- + f_regression: Univariate linear regression tests returning f-statistic + and p-values. + mutual_info_regression: Mutual information for a continuous target. + f_classif: ANOVA F-value between label/feature for classification tasks. + chi2: Chi-squared stats of non-negative features for classification tasks. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import r_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> r_regression(X, y) + array([-0.15..., 1. , -0.22...]) + """ + X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64) + n_samples = X.shape[0] + + # Compute centered values + # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we + # need not center X + if center: + y = y - np.mean(y) + # TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays. + # Here, we check the output of the `.mean` operation that returns a `np.matrix` + # for sparse matrices while a `np.array` for dense and sparse arrays. + # We can reconsider using `isspmatrix` when the minimum version is + # SciPy >= 1.11 + X_means = X.mean(axis=0) + X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means + # Compute the scaled standard deviations via moments + X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2) + else: + X_norms = row_norms(X.T) + + correlation_coefficient = safe_sparse_dot(y, X) + with np.errstate(divide="ignore", invalid="ignore"): + correlation_coefficient /= X_norms + correlation_coefficient /= np.linalg.norm(y) + + if force_finite and not np.isfinite(correlation_coefficient).all(): + # case where the target or some features are constant + # the correlation coefficient(s) is/are set to the minimum (i.e. 0.0) + nan_mask = np.isnan(correlation_coefficient) + correlation_coefficient[nan_mask] = 0.0 + return correlation_coefficient + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "center": ["boolean"], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def f_regression(X, y, *, center=True, force_finite=True): + """Univariate linear regression tests returning F-statistic and p-values. + + Quick linear model for testing the effect of a single regressor, + sequentially for many regressors. + + This is done in 2 steps: + + 1. The cross correlation between each regressor and the target is computed + using :func:`r_regression` as:: + + E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) + + 2. It is converted to an F score and then to a p-value. + + :func:`f_regression` is derived from :func:`r_regression` and will rank + features in the same order if all the features are positively correlated + with the target. + + Note however that contrary to :func:`f_regression`, :func:`r_regression` + values lie in [-1, 1] and can thus be negative. :func:`f_regression` is + therefore recommended as a feature selection criterion to identify + potentially predictive feature for a downstream classifier, irrespective of + the sign of the association with the target variable. + + Furthermore :func:`f_regression` returns p-values while + :func:`r_regression` does not. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix. + + y : array-like of shape (n_samples,) + The target vector. + + center : bool, default=True + Whether or not to center the data matrix `X` and the target vector `y`. + By default, `X` and `y` will be centered. + + force_finite : bool, default=True + Whether or not to force the F-statistics and associated p-values to + be finite. There are two cases where the F-statistic is expected to not + be finite: + + - when the target `y` or some features in `X` are constant. In this + case, the Pearson's R correlation is not defined leading to obtain + `np.nan` values in the F-statistic and p-value. When + `force_finite=True`, the F-statistic is set to `0.0` and the + associated p-value is set to `1.0`. + - when a feature in `X` is perfectly correlated (or + anti-correlated) with the target `y`. In this case, the F-statistic + is expected to be `np.inf`. When `force_finite=True`, the F-statistic + is set to `np.finfo(dtype).max` and the associated p-value is set to + `0.0`. + + .. versionadded:: 1.1 + + Returns + ------- + f_statistic : ndarray of shape (n_features,) + F-statistic for each feature. + + p_values : ndarray of shape (n_features,) + P-values associated with the F-statistic. + + See Also + -------- + r_regression: Pearson's R between label/feature for regression tasks. + f_classif: ANOVA F-value between label/feature for classification tasks. + chi2: Chi-squared stats of non-negative features for classification tasks. + SelectKBest: Select features based on the k highest scores. + SelectFpr: Select features based on a false positive rate test. + SelectFdr: Select features based on an estimated false discovery rate. + SelectFwe: Select features based on family-wise error rate. + SelectPercentile: Select features based on percentile of the highest + scores. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import f_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> f_statistic, p_values = f_regression(X, y) + >>> f_statistic + array([1.2...+00, 2.6...+13, 2.6...+00]) + >>> p_values + array([2.7..., 1.5..., 1.0...]) + """ + correlation_coefficient = r_regression( + X, y, center=center, force_finite=force_finite + ) + deg_of_freedom = y.size - (2 if center else 1) + + corr_coef_squared = correlation_coefficient**2 + + with np.errstate(divide="ignore", invalid="ignore"): + f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom + p_values = stats.f.sf(f_statistic, 1, deg_of_freedom) + + if force_finite and not np.isfinite(f_statistic).all(): + # case where there is a perfect (anti-)correlation + # f-statistics can be set to the maximum and p-values to zero + mask_inf = np.isinf(f_statistic) + f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max + # case where the target or some features are constant + # f-statistics would be minimum and thus p-values large + mask_nan = np.isnan(f_statistic) + f_statistic[mask_nan] = 0.0 + p_values[mask_nan] = 1.0 + return f_statistic, p_values + + +###################################################################### +# Base classes + + +class _BaseFilter(SelectorMixin, BaseEstimator): + """Initialize the univariate feature selection. + + Parameters + ---------- + score_func : callable + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + """ + + _parameter_constraints: dict = {"score_func": [callable]} + + def __init__(self, score_func): + self.score_func = score_func + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Run score function on (X, y) and get the appropriate features. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) or None + The target values (class labels in classification, real numbers in + regression). If the selector is unsupervised then `y` can be set to `None`. + + Returns + ------- + self : object + Returns the instance itself. + """ + if y is None: + X = self._validate_data(X, accept_sparse=["csr", "csc"]) + else: + X, y = self._validate_data( + X, y, accept_sparse=["csr", "csc"], multi_output=True + ) + + self._check_params(X, y) + score_func_ret = self.score_func(X, y) + if isinstance(score_func_ret, (list, tuple)): + self.scores_, self.pvalues_ = score_func_ret + self.pvalues_ = np.asarray(self.pvalues_) + else: + self.scores_ = score_func_ret + self.pvalues_ = None + + self.scores_ = np.asarray(self.scores_) + + return self + + def _check_params(self, X, y): + pass + + def _more_tags(self): + return {"requires_y": True} + + +###################################################################### +# Specific filters +###################################################################### +class SelectPercentile(_BaseFilter): + """Select features according to a percentile of the highest scores. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + .. versionadded:: 0.18 + + percentile : int, default=10 + Percent of features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned only scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Notes + ----- + Ties between features with equal scores will be broken in an unspecified + way. + + This filter supports unsupervised feature selection that only requests `X` for + computing the scores. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.feature_selection import SelectPercentile, chi2 + >>> X, y = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y) + >>> X_new.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "percentile": [Interval(Real, 0, 100, closed="both")], + } + + def __init__(self, score_func=f_classif, *, percentile=10): + super().__init__(score_func=score_func) + self.percentile = percentile + + def _get_support_mask(self): + check_is_fitted(self) + + # Cater for NaNs + if self.percentile == 100: + return np.ones(len(self.scores_), dtype=bool) + elif self.percentile == 0: + return np.zeros(len(self.scores_), dtype=bool) + + scores = _clean_nans(self.scores_) + threshold = np.percentile(scores, 100 - self.percentile) + mask = scores > threshold + ties = np.where(scores == threshold)[0] + if len(ties): + max_feats = int(len(scores) * self.percentile / 100) + kept_ties = ties[: max_feats - mask.sum()] + mask[kept_ties] = True + return mask + + def _more_tags(self): + return {"requires_y": False} + + +class SelectKBest(_BaseFilter): + """Select features according to the k highest scores. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + .. versionadded:: 0.18 + + k : int or "all", default=10 + Number of top features to select. + The "all" option bypasses selection, for use in a parameter search. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned only scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif: ANOVA F-value between label/feature for classification tasks. + mutual_info_classif: Mutual information for a discrete target. + chi2: Chi-squared stats of non-negative features for classification tasks. + f_regression: F-value between label/feature for regression tasks. + mutual_info_regression: Mutual information for a continuous target. + SelectPercentile: Select features based on percentile of the highest + scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Notes + ----- + Ties between features with equal scores will be broken in an unspecified + way. + + This filter supports unsupervised feature selection that only requests `X` for + computing the scores. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.feature_selection import SelectKBest, chi2 + >>> X, y = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y) + >>> X_new.shape + (1797, 20) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")], + } + + def __init__(self, score_func=f_classif, *, k=10): + super().__init__(score_func=score_func) + self.k = k + + def _check_params(self, X, y): + if not isinstance(self.k, str) and self.k > X.shape[1]: + warnings.warn( + f"k={self.k} is greater than n_features={X.shape[1]}. " + "All the features will be returned." + ) + + def _get_support_mask(self): + check_is_fitted(self) + + if self.k == "all": + return np.ones(self.scores_.shape, dtype=bool) + elif self.k == 0: + return np.zeros(self.scores_.shape, dtype=bool) + else: + scores = _clean_nans(self.scores_) + mask = np.zeros(scores.shape, dtype=bool) + + # Request a stable sort. Mergesort takes more memory (~40MB per + # megafeature on x86-64). + mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1 + return mask + + def _more_tags(self): + return {"requires_y": False} + + +class SelectFpr(_BaseFilter): + """Filter: Select the pvalues below alpha based on a FPR test. + + FPR test stands for False Positive Rate test. It controls the total + amount of false detections. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + Features with p-values less than `alpha` are selected. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + chi2 : Chi-squared stats of non-negative features for classification tasks. + mutual_info_classif: Mutual information for a discrete target. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFpr, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 16) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + return self.pvalues_ < self.alpha + + +class SelectFdr(_BaseFilter): + """Filter: Select the p-values for an estimated false discovery rate. + + This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound + on the expected false discovery rate. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + The highest uncorrected p-value for features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + References + ---------- + https://en.wikipedia.org/wiki/False_discovery_rate + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFdr, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 16) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + n_features = len(self.pvalues_) + sv = np.sort(self.pvalues_) + selected = sv[ + sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1) + ] + if selected.size == 0: + return np.zeros_like(self.pvalues_, dtype=bool) + return self.pvalues_ <= selected.max() + + +class SelectFwe(_BaseFilter): + """Filter: Select the p-values corresponding to Family-wise error rate. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + The highest uncorrected p-value for features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFwe, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 15) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + return self.pvalues_ < self.alpha / len(self.pvalues_) + + +###################################################################### +# Generic filter +###################################################################### + + +# TODO this class should fit on either p-values or scores, +# depending on the mode. +class GenericUnivariateSelect(_BaseFilter): + """Univariate feature selector with configurable strategy. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). For modes 'percentile' or 'kbest' it can return + a single array scores. + + mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile' + Feature selection mode. Note that the `'percentile'` and `'kbest'` + modes are supporting unsupervised feature selection (when `y` is `None`). + + param : "all", float or int, default=1e-5 + Parameter of the corresponding mode. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned scores only. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20) + >>> X_new = transformer.fit_transform(X, y) + >>> X_new.shape + (569, 20) + """ + + _selection_modes: dict = { + "percentile": SelectPercentile, + "k_best": SelectKBest, + "fpr": SelectFpr, + "fdr": SelectFdr, + "fwe": SelectFwe, + } + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "mode": [StrOptions(set(_selection_modes.keys()))], + "param": [Interval(Real, 0, None, closed="left"), StrOptions({"all"})], + } + + def __init__(self, score_func=f_classif, *, mode="percentile", param=1e-5): + super().__init__(score_func=score_func) + self.mode = mode + self.param = param + + def _make_selector(self): + selector = self._selection_modes[self.mode](score_func=self.score_func) + + # Now perform some acrobatics to set the right named parameter in + # the selector + possible_params = selector._get_param_names() + possible_params.remove("score_func") + selector.set_params(**{possible_params[0]: self.param}) + + return selector + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + def _check_params(self, X, y): + self._make_selector()._check_params(X, y) + + def _get_support_mask(self): + check_is_fitted(self) + + selector = self._make_selector() + selector.pvalues_ = self.pvalues_ + selector.scores_ = self.scores_ + return selector._get_support_mask() diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..f97c75db1e34b1a5d6179403ebbaf83902c067ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py @@ -0,0 +1,136 @@ +# Author: Lars Buitinck +# License: 3-clause BSD +from numbers import Real + +import numpy as np + +from ..base import BaseEstimator, _fit_context +from ..utils._param_validation import Interval +from ..utils.sparsefuncs import mean_variance_axis, min_max_axis +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +class VarianceThreshold(SelectorMixin, BaseEstimator): + """Feature selector that removes all low-variance features. + + This feature selection algorithm looks only at the features (X), not the + desired outputs (y), and can thus be used for unsupervised learning. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + threshold : float, default=0 + Features with a training-set variance lower than this threshold will + be removed. The default is to keep all features with non-zero variance, + i.e. remove the features that have the same value in all samples. + + Attributes + ---------- + variances_ : array, shape (n_features,) + Variances of individual features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SelectFromModel: Meta-transformer for selecting features based on + importance weights. + SelectPercentile : Select features according to a percentile of the highest + scores. + SequentialFeatureSelector : Transformer that performs Sequential Feature + Selection. + + Notes + ----- + Allows NaN in the input. + Raises ValueError if no feature in X meets the variance threshold. + + Examples + -------- + The following dataset has integer features, two of which are the same + in every sample. These are removed with the default setting for threshold:: + + >>> from sklearn.feature_selection import VarianceThreshold + >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] + >>> selector = VarianceThreshold() + >>> selector.fit_transform(X) + array([[2, 0], + [1, 4], + [1, 1]]) + """ + + _parameter_constraints: dict = { + "threshold": [Interval(Real, 0, None, closed="left")] + } + + def __init__(self, threshold=0.0): + self.threshold = threshold + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn empirical variances from X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Data from which to compute variances, where `n_samples` is + the number of samples and `n_features` is the number of features. + + y : any, default=None + Ignored. This parameter exists only for compatibility with + sklearn.pipeline.Pipeline. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=np.float64, + force_all_finite="allow-nan", + ) + + if hasattr(X, "toarray"): # sparse matrix + _, self.variances_ = mean_variance_axis(X, axis=0) + if self.threshold == 0: + mins, maxes = min_max_axis(X, axis=0) + peak_to_peaks = maxes - mins + else: + self.variances_ = np.nanvar(X, axis=0) + if self.threshold == 0: + peak_to_peaks = np.ptp(X, axis=0) + + if self.threshold == 0: + # Use peak-to-peak to avoid numeric precision issues + # for constant features + compare_arr = np.array([self.variances_, peak_to_peaks]) + self.variances_ = np.nanmin(compare_arr, axis=0) + + if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)): + msg = "No feature in X meets the variance threshold {0:.5f}" + if X.shape[0] == 1: + msg += " (X contains only one sample)" + raise ValueError(msg.format(self.threshold)) + + return self + + def _get_support_mask(self): + check_is_fitted(self) + + return self.variances_ > self.threshold + + def _more_tags(self): + return {"allow_nan": True} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..122f4eafc76d7a5d67b45ddaed6de5ee3e95204b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8418e209df12169416f65981ef4d8cc7962eb9d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aefd1c57ca6e442bb0c12d1cc97775a08c58f75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6faf1130cd221158f29230d85b79ab2e4638f7bd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..5e2bb27bafd1767cec33b1c4255b2116e3f8a9e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py @@ -0,0 +1,153 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.base import BaseEstimator +from sklearn.feature_selection._base import SelectorMixin +from sklearn.utils.fixes import CSC_CONTAINERS + + +class StepSelector(SelectorMixin, BaseEstimator): + """Retain every `step` features (beginning with 0). + + If `step < 1`, then no features are selected. + """ + + def __init__(self, step=2): + self.step = step + + def fit(self, X, y=None): + X = self._validate_data(X, accept_sparse="csc") + return self + + def _get_support_mask(self): + mask = np.zeros(self.n_features_in_, dtype=bool) + if self.step >= 1: + mask[:: self.step] = True + return mask + + +support = [True, False] * 5 +support_inds = [0, 2, 4, 6, 8] +X = np.arange(20).reshape(2, 10) +Xt = np.arange(0, 20, 2).reshape(2, 5) +Xinv = X.copy() +Xinv[:, 1::2] = 0 +y = [0, 1] +feature_names = list("ABCDEFGHIJ") +feature_names_t = feature_names[::2] +feature_names_inv = np.array(feature_names) +feature_names_inv[1::2] = "" + + +def test_transform_dense(): + sel = StepSelector() + Xt_actual = sel.fit(X, y).transform(X) + Xt_actual2 = StepSelector().fit_transform(X, y) + assert_array_equal(Xt, Xt_actual) + assert_array_equal(Xt, Xt_actual2) + + # Check dtype matches + assert np.int32 == sel.transform(X.astype(np.int32)).dtype + assert np.float32 == sel.transform(X.astype(np.float32)).dtype + + # Check 1d list and other dtype: + names_t_actual = sel.transform([feature_names]) + assert_array_equal(feature_names_t, names_t_actual.ravel()) + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.transform(np.array([[1], [2]])) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_transform_sparse(csc_container): + X_sp = csc_container(X) + sel = StepSelector() + Xt_actual = sel.fit(X_sp).transform(X_sp) + Xt_actual2 = sel.fit_transform(X_sp) + assert_array_equal(Xt, Xt_actual.toarray()) + assert_array_equal(Xt, Xt_actual2.toarray()) + + # Check dtype matches + assert np.int32 == sel.transform(X_sp.astype(np.int32)).dtype + assert np.float32 == sel.transform(X_sp.astype(np.float32)).dtype + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.transform(np.array([[1], [2]])) + + +def test_inverse_transform_dense(): + sel = StepSelector() + Xinv_actual = sel.fit(X, y).inverse_transform(Xt) + assert_array_equal(Xinv, Xinv_actual) + + # Check dtype matches + assert np.int32 == sel.inverse_transform(Xt.astype(np.int32)).dtype + assert np.float32 == sel.inverse_transform(Xt.astype(np.float32)).dtype + + # Check 1d list and other dtype: + names_inv_actual = sel.inverse_transform([feature_names_t]) + assert_array_equal(feature_names_inv, names_inv_actual.ravel()) + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.inverse_transform(np.array([[1], [2]])) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_inverse_transform_sparse(csc_container): + X_sp = csc_container(X) + Xt_sp = csc_container(Xt) + sel = StepSelector() + Xinv_actual = sel.fit(X_sp).inverse_transform(Xt_sp) + assert_array_equal(Xinv, Xinv_actual.toarray()) + + # Check dtype matches + assert np.int32 == sel.inverse_transform(Xt_sp.astype(np.int32)).dtype + assert np.float32 == sel.inverse_transform(Xt_sp.astype(np.float32)).dtype + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.inverse_transform(np.array([[1], [2]])) + + +def test_get_support(): + sel = StepSelector() + sel.fit(X, y) + assert_array_equal(support, sel.get_support()) + assert_array_equal(support_inds, sel.get_support(indices=True)) + + +def test_output_dataframe(): + """Check output dtypes for dataframes is consistent with the input dtypes.""" + pd = pytest.importorskip("pandas") + + X = pd.DataFrame( + { + "a": pd.Series([1.0, 2.4, 4.5], dtype=np.float32), + "b": pd.Series(["a", "b", "a"], dtype="category"), + "c": pd.Series(["j", "b", "b"], dtype="category"), + "d": pd.Series([3.0, 2.4, 1.2], dtype=np.float64), + } + ) + + for step in [2, 3]: + sel = StepSelector(step=step).set_output(transform="pandas") + sel.fit(X) + + output = sel.transform(X) + for name, dtype in output.dtypes.items(): + assert dtype == X.dtypes[name] + + # step=0 will select nothing + sel0 = StepSelector(step=0).set_output(transform="pandas") + sel0.fit(X, y) + + msg = "No features were selected" + with pytest.warns(UserWarning, match=msg): + output0 = sel0.transform(X) + + assert_array_equal(output0.index, X.index) + assert output0.shape == (X.shape[0], 0) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py new file mode 100644 index 0000000000000000000000000000000000000000..c50def36f1b6c281e6c96019355b901bf4326a38 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py @@ -0,0 +1,93 @@ +""" +Tests for chi2, currently the only feature selection function designed +specifically to work with sparse matrices. +""" + +import warnings + +import numpy as np +import pytest +import scipy.stats + +from sklearn.feature_selection import SelectKBest, chi2 +from sklearn.feature_selection._univariate_selection import _chisquare +from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +# Feature 0 is highly informative for class 1; +# feature 1 is the same everywhere; +# feature 2 is a bit informative for class 2. +X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]] +y = [0, 1, 2, 2] + + +def mkchi2(k): + """Make k-best chi2 selector""" + return SelectKBest(chi2, k=k) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_chi2(csr_container): + # Test Chi2 feature extraction + + chi2 = mkchi2(k=1).fit(X, y) + chi2 = mkchi2(k=1).fit(X, y) + assert_array_equal(chi2.get_support(indices=True), [0]) + assert_array_equal(chi2.transform(X), np.array(X)[:, [0]]) + + chi2 = mkchi2(k=2).fit(X, y) + assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) + + Xsp = csr_container(X, dtype=np.float64) + chi2 = mkchi2(k=2).fit(Xsp, y) + assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) + Xtrans = chi2.transform(Xsp) + assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2]) + + # == doesn't work on scipy.sparse matrices + Xtrans = Xtrans.toarray() + Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray() + assert_array_almost_equal(Xtrans, Xtrans2) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_chi2_coo(coo_container): + # Check that chi2 works with a COO matrix + # (as returned by CountVectorizer, DictVectorizer) + Xcoo = coo_container(X) + mkchi2(k=2).fit_transform(Xcoo, y) + # if we got here without an exception, we're safe + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_chi2_negative(csr_container): + # Check for proper error on negative numbers in the input X. + X, y = [[0, 1], [-1e-20, 1]], [0, 1] + for X in (X, np.array(X), csr_container(X)): + with pytest.raises(ValueError): + chi2(X, y) + + +def test_chi2_unused_feature(): + # Unused feature should evaluate to NaN + # and should issue no runtime warning + with warnings.catch_warnings(record=True) as warned: + warnings.simplefilter("always") + chi, p = chi2([[1, 0], [0, 0]], [1, 0]) + for w in warned: + if "divide by zero" in repr(w): + raise AssertionError("Found unexpected warning %s" % w) + assert_array_equal(chi, [1, np.nan]) + assert_array_equal(p[1], np.nan) + + +def test_chisquare(): + # Test replacement for scipy.stats.chisquare against the original. + obs = np.array([[2.0, 2.0], [1.0, 1.0]]) + exp = np.array([[1.5, 1.5], [1.5, 1.5]]) + # call SciPy first because our version overwrites obs + chi_scp, p_scp = scipy.stats.chisquare(obs, exp) + chi_our, p_our = _chisquare(obs, exp) + + assert_array_almost_equal(chi_scp, chi_our) + assert_array_almost_equal(p_scp, p_our) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py new file mode 100644 index 0000000000000000000000000000000000000000..3815a88c374e8611dee49e78fe90bd2653efc969 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py @@ -0,0 +1,1017 @@ +""" +Todo: cross-check the F-value with stats model +""" +import itertools +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import sparse, stats + +from sklearn.datasets import load_iris, make_classification, make_regression +from sklearn.feature_selection import ( + GenericUnivariateSelect, + SelectFdr, + SelectFpr, + SelectFwe, + SelectKBest, + SelectPercentile, + chi2, + f_classif, + f_oneway, + f_regression, + mutual_info_classif, + mutual_info_regression, + r_regression, +) +from sklearn.utils import safe_mask +from sklearn.utils._testing import ( + _convert_container, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +############################################################################## +# Test the score functions + + +def test_f_oneway_vs_scipy_stats(): + # Test that our f_oneway gives the same result as scipy.stats + rng = np.random.RandomState(0) + X1 = rng.randn(10, 3) + X2 = 1 + rng.randn(10, 3) + f, pv = stats.f_oneway(X1, X2) + f2, pv2 = f_oneway(X1, X2) + assert np.allclose(f, f2) + assert np.allclose(pv, pv2) + + +def test_f_oneway_ints(): + # Smoke test f_oneway on integers: that it does raise casting errors + # with recent numpys + rng = np.random.RandomState(0) + X = rng.randint(10, size=(10, 10)) + y = np.arange(10) + fint, pint = f_oneway(X, y) + + # test that is gives the same result as with float + f, p = f_oneway(X.astype(float), y) + assert_array_almost_equal(f, fint, decimal=4) + assert_array_almost_equal(p, pint, decimal=4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_f_classif(csr_container): + # Test whether the F test yields meaningful results + # on a simple simulated classification problem + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + F, pv = f_classif(X, y) + F_sparse, pv_sparse = f_classif(csr_container(X), y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + assert_array_almost_equal(F_sparse, F) + assert_array_almost_equal(pv_sparse, pv) + + +@pytest.mark.parametrize("center", [True, False]) +def test_r_regression(center): + X, y = make_regression( + n_samples=2000, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + corr_coeffs = r_regression(X, y, center=center) + assert (-1 < corr_coeffs).all() + assert (corr_coeffs < 1).all() + + sparse_X = _convert_container(X, "sparse") + + sparse_corr_coeffs = r_regression(sparse_X, y, center=center) + assert_allclose(sparse_corr_coeffs, corr_coeffs) + + # Testing against numpy for reference + Z = np.hstack((X, y[:, np.newaxis])) + correlation_matrix = np.corrcoef(Z, rowvar=False) + np_corr_coeffs = correlation_matrix[:-1, -1] + assert_array_almost_equal(np_corr_coeffs, corr_coeffs, decimal=3) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_f_regression(csr_container): + # Test whether the F test yields meaningful results + # on a simple simulated regression problem + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + F, pv = f_regression(X, y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + + # with centering, compare with sparse + F, pv = f_regression(X, y, center=True) + F_sparse, pv_sparse = f_regression(csr_container(X), y, center=True) + assert_allclose(F_sparse, F) + assert_allclose(pv_sparse, pv) + + # again without centering, compare with sparse + F, pv = f_regression(X, y, center=False) + F_sparse, pv_sparse = f_regression(csr_container(X), y, center=False) + assert_allclose(F_sparse, F) + assert_allclose(pv_sparse, pv) + + +def test_f_regression_input_dtype(): + # Test whether f_regression returns the same value + # for any numeric data_type + rng = np.random.RandomState(0) + X = rng.rand(10, 20) + y = np.arange(10).astype(int) + + F1, pv1 = f_regression(X, y) + F2, pv2 = f_regression(X, y.astype(float)) + assert_allclose(F1, F2, 5) + assert_allclose(pv1, pv2, 5) + + +def test_f_regression_center(): + # Test whether f_regression preserves dof according to 'center' argument + # We use two centered variates so we have a simple relationship between + # F-score with variates centering and F-score without variates centering. + # Create toy example + X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean + n_samples = X.size + Y = np.ones(n_samples) + Y[::2] *= -1.0 + Y[0] = 0.0 # have Y mean being null + + F1, _ = f_regression(X, Y, center=True) + F2, _ = f_regression(X, Y, center=False) + assert_allclose(F1 * (n_samples - 1.0) / (n_samples - 2.0), F2) + assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS + + +@pytest.mark.parametrize( + "X, y, expected_corr_coef, force_finite", + [ + ( + # A feature in X is constant - forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([0.0, 0.32075]), + True, + ), + ( + # The target y is constant - forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([0.0, 0.0]), + True, + ), + ( + # A feature in X is constant - not forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([np.nan, 0.32075]), + False, + ), + ( + # The target y is constant - not forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([np.nan, np.nan]), + False, + ), + ], +) +def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite): + """Check the behaviour of `force_finite` for some corner cases with `r_regression`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15672 + """ + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + corr_coef = r_regression(X, y, force_finite=force_finite) + np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef) + + +@pytest.mark.parametrize( + "X, y, expected_f_statistic, expected_p_values, force_finite", + [ + ( + # A feature in X is constant - forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([0.0, 0.2293578]), + np.array([1.0, 0.67924985]), + True, + ), + ( + # The target y is constant - forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([0.0, 0.0]), + np.array([1.0, 1.0]), + True, + ), + ( + # Feature in X correlated with y - forcing finite + np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.finfo(np.float64).max, 0.845433]), + np.array([0.0, 0.454913]), + True, + ), + ( + # Feature in X anti-correlated with y - forcing finite + np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.finfo(np.float64).max, 0.845433]), + np.array([0.0, 0.454913]), + True, + ), + ( + # A feature in X is constant - not forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([np.nan, 0.2293578]), + np.array([np.nan, 0.67924985]), + False, + ), + ( + # The target y is constant - not forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([np.nan, np.nan]), + np.array([np.nan, np.nan]), + False, + ), + ( + # Feature in X correlated with y - not forcing finite + np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.inf, 0.845433]), + np.array([0.0, 0.454913]), + False, + ), + ( + # Feature in X anti-correlated with y - not forcing finite + np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.inf, 0.845433]), + np.array([0.0, 0.454913]), + False, + ), + ], +) +def test_f_regression_corner_case( + X, y, expected_f_statistic, expected_p_values, force_finite +): + """Check the behaviour of `force_finite` for some corner cases with `f_regression`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15672 + """ + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + f_statistic, p_values = f_regression(X, y, force_finite=force_finite) + np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic) + np.testing.assert_array_almost_equal(p_values, expected_p_values) + + +def test_f_classif_multi_class(): + # Test whether the F test yields meaningful results + # on a simple simulated classification problem + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + F, pv = f_classif(X, y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + + +def test_select_percentile_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the percentile heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectPercentile(f_classif, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_select_percentile_classif_sparse(csr_container): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the percentile heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + X = csr_container(X) + univariate_filter = SelectPercentile(f_classif, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r.toarray(), X_r2.toarray()) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + X_r2inv = univariate_filter.inverse_transform(X_r2) + assert sparse.issparse(X_r2inv) + support_mask = safe_mask(X_r2inv, support) + assert X_r2inv.shape == X.shape + assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) + # Check other columns are empty + assert X_r2inv.nnz == X_r.nnz + + +############################################################################## +# Test univariate selection in classification settings + + +def test_select_kbest_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the k best heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectKBest(f_classif, k=5) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="k_best", param=5) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +def test_select_kbest_all(): + # Test whether k="all" correctly returns all features. + X, y = make_classification( + n_samples=20, n_features=10, shuffle=False, random_state=0 + ) + + univariate_filter = SelectKBest(f_classif, k="all") + X_r = univariate_filter.fit(X, y).transform(X) + assert_array_equal(X, X_r) + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/24949 + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="k_best", param="all") + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + + +@pytest.mark.parametrize("dtype_in", [np.float32, np.float64]) +def test_select_kbest_zero(dtype_in): + # Test whether k=0 correctly returns no features. + X, y = make_classification( + n_samples=20, n_features=10, shuffle=False, random_state=0 + ) + X = X.astype(dtype_in) + + univariate_filter = SelectKBest(f_classif, k=0) + univariate_filter.fit(X, y) + support = univariate_filter.get_support() + gtruth = np.zeros(10, dtype=bool) + assert_array_equal(support, gtruth) + with pytest.warns(UserWarning, match="No features were selected"): + X_selected = univariate_filter.transform(X) + assert X_selected.shape == (20, 0) + assert X_selected.dtype == dtype_in + + +def test_select_heuristics_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the fdr, fwe and fpr heuristics + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectFwe(f_classif, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + gtruth = np.zeros(20) + gtruth[:5] = 1 + for mode in ["fdr", "fpr", "fwe"]: + X_r2 = ( + GenericUnivariateSelect(f_classif, mode=mode, param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + assert_allclose(support, gtruth) + + +############################################################################## +# Test univariate selection in regression settings + + +def assert_best_scores_kept(score_filter): + scores = score_filter.scores_ + support = score_filter.get_support() + assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum() :]) + + +def test_select_percentile_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the percentile heuristic + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectPercentile(f_regression, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + X_2 = X.copy() + X_2[:, np.logical_not(support)] = 0 + assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) + # Check inverse_transform respects dtype + assert_array_equal( + X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool)) + ) + + +def test_select_percentile_regression_full(): + # Test whether the relative univariate feature selection + # selects all features when '100%' is asked. + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectPercentile(f_regression, percentile=100) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="percentile", param=100) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.ones(20) + assert_array_equal(support, gtruth) + + +def test_select_kbest_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the k best heuristic + X, y = make_regression( + n_samples=200, + n_features=20, + n_informative=5, + shuffle=False, + random_state=0, + noise=10, + ) + + univariate_filter = SelectKBest(f_regression, k=5) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="k_best", param=5) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +def test_select_heuristics_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the fpr, fdr or fwe heuristics + X, y = make_regression( + n_samples=200, + n_features=20, + n_informative=5, + shuffle=False, + random_state=0, + noise=10, + ) + + univariate_filter = SelectFpr(f_regression, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + gtruth = np.zeros(20) + gtruth[:5] = 1 + for mode in ["fdr", "fpr", "fwe"]: + X_r2 = ( + GenericUnivariateSelect(f_regression, mode=mode, param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + assert_array_equal(support[:5], np.ones((5,), dtype=bool)) + assert np.sum(support[5:] == 1) < 3 + + +def test_boundary_case_ch2(): + # Test boundary case, and always aim to select 1 feature. + X = np.array([[10, 20], [20, 20], [20, 30]]) + y = np.array([[1], [0], [0]]) + scores, pvalues = chi2(X, y) + assert_array_almost_equal(scores, np.array([4.0, 0.71428571])) + assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472])) + + filter_fdr = SelectFdr(chi2, alpha=0.1) + filter_fdr.fit(X, y) + support_fdr = filter_fdr.get_support() + assert_array_equal(support_fdr, np.array([True, False])) + + filter_kbest = SelectKBest(chi2, k=1) + filter_kbest.fit(X, y) + support_kbest = filter_kbest.get_support() + assert_array_equal(support_kbest, np.array([True, False])) + + filter_percentile = SelectPercentile(chi2, percentile=50) + filter_percentile.fit(X, y) + support_percentile = filter_percentile.get_support() + assert_array_equal(support_percentile, np.array([True, False])) + + filter_fpr = SelectFpr(chi2, alpha=0.1) + filter_fpr.fit(X, y) + support_fpr = filter_fpr.get_support() + assert_array_equal(support_fpr, np.array([True, False])) + + filter_fwe = SelectFwe(chi2, alpha=0.1) + filter_fwe.fit(X, y) + support_fwe = filter_fwe.get_support() + assert_array_equal(support_fwe, np.array([True, False])) + + +@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1]) +@pytest.mark.parametrize("n_informative", [1, 5, 10]) +def test_select_fdr_regression(alpha, n_informative): + # Test that fdr heuristic actually has low FDR. + def single_fdr(alpha, n_informative, random_state): + X, y = make_regression( + n_samples=150, + n_features=20, + n_informative=n_informative, + shuffle=False, + random_state=random_state, + noise=10, + ) + + with warnings.catch_warnings(record=True): + # Warnings can be raised when no features are selected + # (low alpha or very noisy data) + univariate_filter = SelectFdr(f_regression, alpha=alpha) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="fdr", param=alpha) + .fit(X, y) + .transform(X) + ) + + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + num_false_positives = np.sum(support[n_informative:] == 1) + num_true_positives = np.sum(support[:n_informative] == 1) + + if num_false_positives == 0: + return 0.0 + false_discovery_rate = num_false_positives / ( + num_true_positives + num_false_positives + ) + return false_discovery_rate + + # As per Benjamini-Hochberg, the expected false discovery rate + # should be lower than alpha: + # FDR = E(FP / (TP + FP)) <= alpha + false_discovery_rate = np.mean( + [single_fdr(alpha, n_informative, random_state) for random_state in range(100)] + ) + assert alpha >= false_discovery_rate + + # Make sure that the empirical false discovery rate increases + # with alpha: + if false_discovery_rate != 0: + assert false_discovery_rate > alpha / 10 + + +def test_select_fwe_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the fwe heuristic + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectFwe(f_regression, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="fwe", param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support[:5], np.ones((5,), dtype=bool)) + assert np.sum(support[5:] == 1) < 2 + + +def test_selectkbest_tiebreaking(): + # Test whether SelectKBest actually selects k features in case of ties. + # Prior to 0.11, SelectKBest would return more features than requested. + Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] + y = [1] + dummy_score = lambda X, y: (X[0], X[0]) + for X in Xs: + sel = SelectKBest(dummy_score, k=1) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert X1.shape[1] == 1 + assert_best_scores_kept(sel) + + sel = SelectKBest(dummy_score, k=2) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert X2.shape[1] == 2 + assert_best_scores_kept(sel) + + +def test_selectpercentile_tiebreaking(): + # Test if SelectPercentile selects the right n_features in case of ties. + Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] + y = [1] + dummy_score = lambda X, y: (X[0], X[0]) + for X in Xs: + sel = SelectPercentile(dummy_score, percentile=34) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert X1.shape[1] == 1 + assert_best_scores_kept(sel) + + sel = SelectPercentile(dummy_score, percentile=67) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert X2.shape[1] == 2 + assert_best_scores_kept(sel) + + +def test_tied_pvalues(): + # Test whether k-best and percentiles work with tied pvalues from chi2. + # chi2 will return the same p-values for the following features, but it + # will return different scores. + X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) + y = [0, 1] + + for perm in itertools.permutations((0, 1, 2)): + X = X0[:, perm] + Xt = SelectKBest(chi2, k=2).fit_transform(X, y) + assert Xt.shape == (2, 2) + assert 9998 not in Xt + + Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) + assert Xt.shape == (2, 2) + assert 9998 not in Xt + + +def test_scorefunc_multilabel(): + # Test whether k-best and percentiles works with multilabels with chi2. + + X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]]) + y = [[1, 1], [0, 1], [1, 0]] + + Xt = SelectKBest(chi2, k=2).fit_transform(X, y) + assert Xt.shape == (3, 2) + assert 0 not in Xt + + Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) + assert Xt.shape == (3, 2) + assert 0 not in Xt + + +def test_tied_scores(): + # Test for stable sorting in k-best with tied scores. + X_train = np.array([[0, 0, 0], [1, 1, 1]]) + y_train = [0, 1] + + for n_features in [1, 2, 3]: + sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) + X_test = sel.transform([[0, 1, 2]]) + assert_array_equal(X_test[0], np.arange(3)[-n_features:]) + + +def test_nans(): + # Assert that SelectKBest and SelectPercentile can handle NaNs. + # First feature has zero variance to confuse f_classif (ANOVA) and + # make it return a NaN. + X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] + y = [1, 0, 1] + + for select in ( + SelectKBest(f_classif, k=2), + SelectPercentile(f_classif, percentile=67), + ): + ignore_warnings(select.fit)(X, y) + assert_array_equal(select.get_support(indices=True), np.array([1, 2])) + + +def test_invalid_k(): + X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] + y = [1, 0, 1] + + msg = "k=4 is greater than n_features=3. All the features will be returned." + with pytest.warns(UserWarning, match=msg): + SelectKBest(k=4).fit(X, y) + with pytest.warns(UserWarning, match=msg): + GenericUnivariateSelect(mode="k_best", param=4).fit(X, y) + + +def test_f_classif_constant_feature(): + # Test that f_classif warns if a feature is constant throughout. + + X, y = make_classification(n_samples=10, n_features=5) + X[:, 0] = 2.0 + with pytest.warns(UserWarning): + f_classif(X, y) + + +def test_no_feature_selected(): + rng = np.random.RandomState(0) + + # Generate random uncorrelated data: a strict univariate test should + # rejects all the features + X = rng.rand(40, 10) + y = rng.randint(0, 4, size=40) + strict_selectors = [ + SelectFwe(alpha=0.01).fit(X, y), + SelectFdr(alpha=0.01).fit(X, y), + SelectFpr(alpha=0.01).fit(X, y), + SelectPercentile(percentile=0).fit(X, y), + SelectKBest(k=0).fit(X, y), + ] + for selector in strict_selectors: + assert_array_equal(selector.get_support(), np.zeros(10)) + with pytest.warns(UserWarning, match="No features were selected"): + X_selected = selector.transform(X) + assert X_selected.shape == (40, 0) + + +def test_mutual_info_classif(): + X, y = make_classification( + n_samples=100, + n_features=5, + n_informative=1, + n_redundant=1, + n_repeated=0, + n_classes=2, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + # Test in KBest mode. + univariate_filter = SelectKBest(mutual_info_classif, k=2) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_classif, mode="k_best", param=2) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(5) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + # Test in Percentile mode. + univariate_filter = SelectPercentile(mutual_info_classif, percentile=40) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_classif, mode="percentile", param=40) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(5) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + +def test_mutual_info_regression(): + X, y = make_regression( + n_samples=100, + n_features=10, + n_informative=2, + shuffle=False, + random_state=0, + noise=10, + ) + + # Test in KBest mode. + univariate_filter = SelectKBest(mutual_info_regression, k=2) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(mutual_info_regression, mode="k_best", param=2) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(10) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + # Test in Percentile mode. + univariate_filter = SelectPercentile(mutual_info_regression, percentile=20) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_regression, mode="percentile", param=20) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(10) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + +def test_dataframe_output_dtypes(): + """Check that the output datafarme dtypes are the same as the input. + + Non-regression test for gh-24860. + """ + pd = pytest.importorskip("pandas") + + X, y = load_iris(return_X_y=True, as_frame=True) + X = X.astype( + { + "petal length (cm)": np.float32, + "petal width (cm)": np.float64, + } + ) + X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10) + + column_order = X.columns + + def selector(X, y): + ranking = { + "sepal length (cm)": 1, + "sepal width (cm)": 2, + "petal length (cm)": 3, + "petal width (cm)": 4, + "petal_width_binned": 5, + } + return np.asarray([ranking[name] for name in column_order]) + + univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas") + output = univariate_filter.fit_transform(X, y) + + assert_array_equal( + output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"] + ) + for name, dtype in output.dtypes.items(): + assert dtype == X.dtypes[name] + + +@pytest.mark.parametrize( + "selector", + [ + SelectKBest(k=4), + SelectPercentile(percentile=80), + GenericUnivariateSelect(mode="k_best", param=4), + GenericUnivariateSelect(mode="percentile", param=80), + ], +) +def test_unsupervised_filter(selector): + """Check support for unsupervised feature selection for the filter that could + require only `X`. + """ + rng = np.random.RandomState(0) + X = rng.randn(10, 5) + + def score_func(X, y=None): + return np.array([1, 1, 1, 1, 0]) + + selector.set_params(score_func=score_func) + selector.fit(X) + X_trans = selector.transform(X) + assert_allclose(X_trans, X[:, :4]) + X_trans = selector.fit_transform(X) + assert_allclose(X_trans, X[:, :4]) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py new file mode 100644 index 0000000000000000000000000000000000000000..3573b7a078294f6284920c5f387fce5f9625906b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py @@ -0,0 +1,684 @@ +import re +import warnings +from unittest.mock import Mock + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator +from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression +from sklearn.datasets import make_friedman1 +from sklearn.decomposition import PCA +from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier +from sklearn.exceptions import NotFittedError +from sklearn.feature_selection import SelectFromModel +from sklearn.linear_model import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + LinearRegression, + LogisticRegression, + PassiveAggressiveClassifier, + SGDClassifier, +) +from sklearn.pipeline import make_pipeline +from sklearn.svm import LinearSVC +from sklearn.utils._testing import ( + MinimalClassifier, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + skip_if_32bit, +) + + +class NaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": True} + + +class NoNaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": False} + + +class NaNTagRandomForest(RandomForestClassifier): + def _more_tags(self): + return {"allow_nan": True} + + +iris = datasets.load_iris() +data, y = iris.data, iris.target +rng = np.random.RandomState(0) + + +def test_invalid_input(): + clf = SGDClassifier( + alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None + ) + for threshold in ["gobbledigook", ".5 * gobbledigook"]: + model = SelectFromModel(clf, threshold=threshold) + model.fit(data, y) + with pytest.raises(ValueError): + model.transform(data) + + +def test_input_estimator_unchanged(): + # Test that SelectFromModel fits on a clone of the estimator. + est = RandomForestClassifier() + transformer = SelectFromModel(estimator=est) + transformer.fit(data, y) + assert transformer.estimator is est + + +@pytest.mark.parametrize( + "max_features, err_type, err_msg", + [ + ( + data.shape[1] + 1, + ValueError, + "max_features ==", + ), + ( + lambda X: 1.5, + TypeError, + "max_features must be an instance of int, not float.", + ), + ( + lambda X: data.shape[1] + 1, + ValueError, + "max_features ==", + ), + ( + lambda X: -1, + ValueError, + "max_features ==", + ), + ], +) +def test_max_features_error(max_features, err_type, err_msg): + err_msg = re.escape(err_msg) + clf = RandomForestClassifier(n_estimators=5, random_state=0) + + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + with pytest.raises(err_type, match=err_msg): + transformer.fit(data, y) + + +@pytest.mark.parametrize("max_features", [0, 2, data.shape[1], None]) +def test_inferred_max_features_integer(max_features): + """Check max_features_ and output shape for integer max_features.""" + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(data, y) + if max_features is not None: + assert transformer.max_features_ == max_features + assert X_trans.shape[1] == transformer.max_features_ + else: + assert not hasattr(transformer, "max_features_") + assert X_trans.shape[1] == data.shape[1] + + +@pytest.mark.parametrize( + "max_features", + [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], +) +def test_inferred_max_features_callable(max_features): + """Check max_features_ and output shape for callable max_features.""" + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(data, y) + assert transformer.max_features_ == max_features(data) + assert X_trans.shape[1] == transformer.max_features_ + + +@pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2]) +def test_max_features_array_like(max_features): + X = [ + [0.87, -1.34, 0.31], + [-2.79, -0.02, -0.85], + [-1.34, -0.48, -2.55], + [1.92, 1.48, 0.65], + ] + y = [0, 1, 0, 1] + + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(X, y) + assert X_trans.shape[1] == transformer.max_features_ + + +@pytest.mark.parametrize( + "max_features", + [lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1], +) +def test_max_features_callable_data(max_features): + """Tests that the callable passed to `fit` is called on X.""" + clf = RandomForestClassifier(n_estimators=50, random_state=0) + m = Mock(side_effect=max_features) + transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf) + transformer.fit_transform(data, y) + m.assert_called_with(data) + + +class FixedImportanceEstimator(BaseEstimator): + def __init__(self, importances): + self.importances = importances + + def fit(self, X, y=None): + self.feature_importances_ = np.array(self.importances) + + +def test_max_features(): + # Test max_features parameter using various values + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + max_features = X.shape[1] + est = RandomForestClassifier(n_estimators=50, random_state=0) + + transformer1 = SelectFromModel(estimator=est, threshold=-np.inf) + transformer2 = SelectFromModel( + estimator=est, max_features=max_features, threshold=-np.inf + ) + X_new1 = transformer1.fit_transform(X, y) + X_new2 = transformer2.fit_transform(X, y) + assert_allclose(X_new1, X_new2) + + # Test max_features against actual model. + transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42)) + X_new1 = transformer1.fit_transform(X, y) + scores1 = np.abs(transformer1.estimator_.coef_) + candidate_indices1 = np.argsort(-scores1, kind="mergesort") + + for n_features in range(1, X_new1.shape[1] + 1): + transformer2 = SelectFromModel( + estimator=Lasso(alpha=0.025, random_state=42), + max_features=n_features, + threshold=-np.inf, + ) + X_new2 = transformer2.fit_transform(X, y) + scores2 = np.abs(transformer2.estimator_.coef_) + candidate_indices2 = np.argsort(-scores2, kind="mergesort") + assert_allclose( + X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]] + ) + assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_) + + +def test_max_features_tiebreak(): + # Test if max_features can break tie among feature importance + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + max_features = X.shape[1] + + feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1]) + for n_features in range(1, max_features + 1): + transformer = SelectFromModel( + FixedImportanceEstimator(feature_importances), + max_features=n_features, + threshold=-np.inf, + ) + X_new = transformer.fit_transform(X, y) + selected_feature_indices = np.where(transformer._get_support_mask())[0] + assert_array_equal(selected_feature_indices, np.arange(n_features)) + assert X_new.shape[1] == n_features + + +def test_threshold_and_max_features(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + est = RandomForestClassifier(n_estimators=50, random_state=0) + + transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf) + X_new1 = transformer1.fit_transform(X, y) + + transformer2 = SelectFromModel(estimator=est, threshold=0.04) + X_new2 = transformer2.fit_transform(X, y) + + transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04) + X_new3 = transformer3.fit_transform(X, y) + assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1]) + selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :]) + assert_allclose(X_new3, X[:, selected_indices[0]]) + + +@skip_if_32bit +def test_feature_importances(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + est = RandomForestClassifier(n_estimators=50, random_state=0) + for threshold, func in zip(["mean", "median"], [np.mean, np.median]): + transformer = SelectFromModel(estimator=est, threshold=threshold) + transformer.fit(X, y) + assert hasattr(transformer.estimator_, "feature_importances_") + + X_new = transformer.transform(X) + assert X_new.shape[1] < X.shape[1] + importances = transformer.estimator_.feature_importances_ + + feature_mask = np.abs(importances) > func(importances) + assert_array_almost_equal(X_new, X[:, feature_mask]) + + +def test_sample_weight(): + # Ensure sample weights are passed to underlying estimator + X, y = datasets.make_classification( + n_samples=100, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # Check with sample weights + sample_weight = np.ones(y.shape) + sample_weight[y == 1] *= 100 + + est = LogisticRegression(random_state=0, fit_intercept=False) + transformer = SelectFromModel(estimator=est) + transformer.fit(X, y, sample_weight=None) + mask = transformer._get_support_mask() + transformer.fit(X, y, sample_weight=sample_weight) + weighted_mask = transformer._get_support_mask() + assert not np.all(weighted_mask == mask) + transformer.fit(X, y, sample_weight=3 * sample_weight) + reweighted_mask = transformer._get_support_mask() + assert np.all(weighted_mask == reweighted_mask) + + +@pytest.mark.parametrize( + "estimator", + [ + Lasso(alpha=0.1, random_state=42), + LassoCV(random_state=42), + ElasticNet(l1_ratio=1, random_state=42), + ElasticNetCV(l1_ratio=[1], random_state=42), + ], +) +def test_coef_default_threshold(estimator): + X, y = datasets.make_classification( + n_samples=100, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # For the Lasso and related models, the threshold defaults to 1e-5 + transformer = SelectFromModel(estimator=estimator) + transformer.fit(X, y) + X_new = transformer.transform(X) + mask = np.abs(transformer.estimator_.coef_) > 1e-5 + assert_array_almost_equal(X_new, X[:, mask]) + + +@skip_if_32bit +def test_2d_coef(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + n_classes=4, + ) + + est = LogisticRegression() + for threshold, func in zip(["mean", "median"], [np.mean, np.median]): + for order in [1, 2, np.inf]: + # Fit SelectFromModel a multi-class problem + transformer = SelectFromModel( + estimator=LogisticRegression(), threshold=threshold, norm_order=order + ) + transformer.fit(X, y) + assert hasattr(transformer.estimator_, "coef_") + X_new = transformer.transform(X) + assert X_new.shape[1] < X.shape[1] + + # Manually check that the norm is correctly performed + est.fit(X, y) + importances = np.linalg.norm(est.coef_, axis=0, ord=order) + feature_mask = importances > func(importances) + assert_array_almost_equal(X_new, X[:, feature_mask]) + + +def test_partial_fit(): + est = PassiveAggressiveClassifier( + random_state=0, shuffle=False, max_iter=5, tol=None + ) + transformer = SelectFromModel(estimator=est) + transformer.partial_fit(data, y, classes=np.unique(y)) + old_model = transformer.estimator_ + transformer.partial_fit(data, y, classes=np.unique(y)) + new_model = transformer.estimator_ + assert old_model is new_model + + X_transform = transformer.transform(data) + transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) + assert_array_almost_equal(X_transform, transformer.transform(data)) + + # check that if est doesn't have partial_fit, neither does SelectFromModel + transformer = SelectFromModel(estimator=RandomForestClassifier()) + assert not hasattr(transformer, "partial_fit") + + +def test_calling_fit_reinitializes(): + est = LinearSVC(dual="auto", random_state=0) + transformer = SelectFromModel(estimator=est) + transformer.fit(data, y) + transformer.set_params(estimator__C=100) + transformer.fit(data, y) + assert transformer.estimator_.C == 100 + + +def test_prefit(): + # Test all possible combinations of the prefit parameter. + + # Passing a prefit parameter with the selected model + # and fitting a unfit model with prefit=False should give same results. + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf) + model.fit(data, y) + X_transform = model.transform(data) + clf.fit(data, y) + model = SelectFromModel(clf, prefit=True) + assert_array_almost_equal(model.transform(data), X_transform) + model.fit(data, y) + assert model.estimator_ is not clf + + # Check that the model is rewritten if prefit=False and a fitted model is + # passed + model = SelectFromModel(clf, prefit=False) + model.fit(data, y) + assert_array_almost_equal(model.transform(data), X_transform) + + # Check that passing an unfitted estimator with `prefit=True` raises a + # `ValueError` + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf, prefit=True) + err_msg = "When `prefit=True`, `estimator` is expected to be a fitted estimator." + with pytest.raises(NotFittedError, match=err_msg): + model.fit(data, y) + with pytest.raises(NotFittedError, match=err_msg): + model.partial_fit(data, y) + with pytest.raises(NotFittedError, match=err_msg): + model.transform(data) + + # Check that the internal parameters of prefitted model are not changed + # when calling `fit` or `partial_fit` with `prefit=True` + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, tol=None).fit(data, y) + model = SelectFromModel(clf, prefit=True) + model.fit(data, y) + assert_allclose(model.estimator_.coef_, clf.coef_) + model.partial_fit(data, y) + assert_allclose(model.estimator_.coef_, clf.coef_) + + +def test_prefit_max_features(): + """Check the interaction between `prefit` and `max_features`.""" + # case 1: an error should be raised at `transform` if `fit` was not called to + # validate the attributes + estimator = RandomForestClassifier(n_estimators=5, random_state=0) + estimator.fit(data, y) + model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1]) + + err_msg = ( + "When `prefit=True` and `max_features` is a callable, call `fit` " + "before calling `transform`." + ) + with pytest.raises(NotFittedError, match=err_msg): + model.transform(data) + + # case 2: `max_features` is not validated and different from an integer + # FIXME: we cannot validate the upper bound of the attribute at transform + # and we should force calling `fit` if we intend to force the attribute + # to have such an upper bound. + max_features = 2.5 + model.set_params(max_features=max_features) + with pytest.raises(ValueError, match="`max_features` must be an integer"): + model.transform(data) + + +def test_prefit_get_feature_names_out(): + """Check the interaction between prefit and the feature names.""" + clf = RandomForestClassifier(n_estimators=2, random_state=0) + clf.fit(data, y) + model = SelectFromModel(clf, prefit=True, max_features=1) + + name = type(model).__name__ + err_msg = ( + f"This {name} instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=err_msg): + model.get_feature_names_out() + + model.fit(data, y) + feature_names = model.get_feature_names_out() + assert feature_names == ["x3"] + + +def test_threshold_string(): + est = RandomForestClassifier(n_estimators=50, random_state=0) + model = SelectFromModel(est, threshold="0.5*mean") + model.fit(data, y) + X_transform = model.transform(data) + + # Calculate the threshold from the estimator directly. + est.fit(data, y) + threshold = 0.5 * np.mean(est.feature_importances_) + mask = est.feature_importances_ > threshold + assert_array_almost_equal(X_transform, data[:, mask]) + + +def test_threshold_without_refitting(): + # Test that the threshold can be set without refitting the model. + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf, threshold="0.1 * mean") + model.fit(data, y) + X_transform = model.transform(data) + + # Set a higher threshold to filter out more features. + model.threshold = "1.0 * mean" + assert X_transform.shape[1] > model.transform(data).shape[1] + + +def test_fit_accepts_nan_inf(): + # Test that fit doesn't check for np.inf and np.nan values. + clf = HistGradientBoostingClassifier(random_state=0) + + model = SelectFromModel(estimator=clf) + + nan_data = data.copy() + nan_data[0] = np.nan + nan_data[1] = np.inf + + model.fit(data, y) + + +def test_transform_accepts_nan_inf(): + # Test that transform doesn't check for np.inf and np.nan values. + clf = NaNTagRandomForest(n_estimators=100, random_state=0) + nan_data = data.copy() + + model = SelectFromModel(estimator=clf) + model.fit(nan_data, y) + + nan_data[0] = np.nan + nan_data[1] = np.inf + + model.transform(nan_data) + + +def test_allow_nan_tag_comes_from_estimator(): + allow_nan_est = NaNTag() + model = SelectFromModel(estimator=allow_nan_est) + assert model._get_tags()["allow_nan"] is True + + no_nan_est = NoNaNTag() + model = SelectFromModel(estimator=no_nan_est) + assert model._get_tags()["allow_nan"] is False + + +def _pca_importances(pca_estimator): + return np.abs(pca_estimator.explained_variance_) + + +@pytest.mark.parametrize( + "estimator, importance_getter", + [ + ( + make_pipeline(PCA(random_state=0), LogisticRegression()), + "named_steps.logisticregression.coef_", + ), + (PCA(random_state=0), _pca_importances), + ], +) +def test_importance_getter(estimator, importance_getter): + selector = SelectFromModel( + estimator, threshold="mean", importance_getter=importance_getter + ) + selector.fit(data, y) + assert selector.transform(data).shape[1] == 1 + + +@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) +def test_select_from_model_pls(PLSEstimator): + """Check the behaviour of SelectFromModel with PLS estimators. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12410 + """ + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = PLSEstimator(n_components=1) + model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y) + assert model.score(X, y) > 0.5 + + +def test_estimator_does_not_support_feature_names(): + """SelectFromModel works with estimators that do not support feature_names_in_. + + Non-regression test for #21949. + """ + pytest.importorskip("pandas") + X, y = datasets.load_iris(as_frame=True, return_X_y=True) + all_feature_names = set(X.columns) + + def importance_getter(estimator): + return np.arange(X.shape[1]) + + selector = SelectFromModel( + MinimalClassifier(), importance_getter=importance_getter + ).fit(X, y) + + # selector learns the feature names itself + assert_array_equal(selector.feature_names_in_, X.columns) + + feature_names_out = set(selector.get_feature_names_out()) + assert feature_names_out < all_feature_names + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + + selector.transform(X.iloc[1:3]) + + +@pytest.mark.parametrize( + "error, err_msg, max_features", + ( + [ValueError, "max_features == 10, must be <= 4", 10], + [ValueError, "max_features == 5, must be <= 4", lambda x: x.shape[1] + 1], + ), +) +def test_partial_fit_validate_max_features(error, err_msg, max_features): + """Test that partial_fit from SelectFromModel validates `max_features`.""" + X, y = datasets.make_classification( + n_samples=100, + n_features=4, + random_state=0, + ) + + with pytest.raises(error, match=err_msg): + SelectFromModel( + estimator=SGDClassifier(), max_features=max_features + ).partial_fit(X, y, classes=[0, 1]) + + +@pytest.mark.parametrize("as_frame", [True, False]) +def test_partial_fit_validate_feature_names(as_frame): + """Test that partial_fit from SelectFromModel validates `feature_names_in_`.""" + pytest.importorskip("pandas") + X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True) + + selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit( + X, y, classes=[0, 1, 2] + ) + if as_frame: + assert_array_equal(selector.feature_names_in_, X.columns) + else: + assert not hasattr(selector, "feature_names_in_") + + +def test_from_model_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the estimator + does not implement the `partial_fit` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + # `LinearRegression` does not implement 'partial_fit' and should raise an + # AttributeError + from_model = SelectFromModel(estimator=LinearRegression()) + + outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'" + inner_msg = "'LinearRegression' object has no attribute 'partial_fit'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + from_model.fit(data, y).partial_fit(data) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py new file mode 100644 index 0000000000000000000000000000000000000000..26367544baa539d8daa7b6508f4ae23cbf4da31c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py @@ -0,0 +1,254 @@ +import numpy as np +import pytest + +from sklearn.feature_selection import mutual_info_classif, mutual_info_regression +from sklearn.feature_selection._mutual_info import _compute_mi +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_compute_mi_dd(): + # In discrete case computations are straightforward and can be done + # by hand on given vectors. + x = np.array([0, 1, 1, 0, 0]) + y = np.array([1, 0, 0, 0, 1]) + + H_x = H_y = -(3 / 5) * np.log(3 / 5) - (2 / 5) * np.log(2 / 5) + H_xy = -1 / 5 * np.log(1 / 5) - 2 / 5 * np.log(2 / 5) - 2 / 5 * np.log(2 / 5) + I_xy = H_x + H_y - H_xy + + assert_allclose(_compute_mi(x, y, x_discrete=True, y_discrete=True), I_xy) + + +def test_compute_mi_cc(global_dtype): + # For two continuous variables a good approach is to test on bivariate + # normal distribution, where mutual information is known. + + # Mean of the distribution, irrelevant for mutual information. + mean = np.zeros(2) + + # Setup covariance matrix with correlation coeff. equal 0.5. + sigma_1 = 1 + sigma_2 = 10 + corr = 0.5 + cov = np.array( + [ + [sigma_1**2, corr * sigma_1 * sigma_2], + [corr * sigma_1 * sigma_2, sigma_2**2], + ] + ) + + # True theoretical mutual information. + I_theory = np.log(sigma_1) + np.log(sigma_2) - 0.5 * np.log(np.linalg.det(cov)) + + rng = check_random_state(0) + Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) + + x, y = Z[:, 0], Z[:, 1] + + # Theory and computed values won't be very close + # We here check with a large relative tolerance + for n_neighbors in [3, 5, 7]: + I_computed = _compute_mi( + x, y, x_discrete=False, y_discrete=False, n_neighbors=n_neighbors + ) + assert_allclose(I_computed, I_theory, rtol=1e-1) + + +def test_compute_mi_cd(global_dtype): + # To test define a joint distribution as follows: + # p(x, y) = p(x) p(y | x) + # X ~ Bernoulli(p) + # (Y | x = 0) ~ Uniform(-1, 1) + # (Y | x = 1) ~ Uniform(0, 2) + + # Use the following formula for mutual information: + # I(X; Y) = H(Y) - H(Y | X) + # Two entropies can be computed by hand: + # H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2) + # H(Y | X) = ln(2) + + # Now we need to implement sampling from out distribution, which is + # done easily using conditional distribution logic. + + n_samples = 1000 + rng = check_random_state(0) + + for p in [0.3, 0.5, 0.7]: + x = rng.uniform(size=n_samples) > p + + y = np.empty(n_samples, global_dtype) + mask = x == 0 + y[mask] = rng.uniform(-1, 1, size=np.sum(mask)) + y[~mask] = rng.uniform(0, 2, size=np.sum(~mask)) + + I_theory = -0.5 * ( + (1 - p) * np.log(0.5 * (1 - p)) + p * np.log(0.5 * p) + np.log(0.5) + ) - np.log(2) + + # Assert the same tolerance. + for n_neighbors in [3, 5, 7]: + I_computed = _compute_mi( + x, y, x_discrete=True, y_discrete=False, n_neighbors=n_neighbors + ) + assert_allclose(I_computed, I_theory, rtol=1e-1) + + +def test_compute_mi_cd_unique_label(global_dtype): + # Test that adding unique label doesn't change MI. + n_samples = 100 + x = np.random.uniform(size=n_samples) > 0.5 + + y = np.empty(n_samples, global_dtype) + mask = x == 0 + y[mask] = np.random.uniform(-1, 1, size=np.sum(mask)) + y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask)) + + mi_1 = _compute_mi(x, y, x_discrete=True, y_discrete=False) + + x = np.hstack((x, 2)) + y = np.hstack((y, 10)) + mi_2 = _compute_mi(x, y, x_discrete=True, y_discrete=False) + + assert_allclose(mi_1, mi_2) + + +# We are going test that feature ordering by MI matches our expectations. +def test_mutual_info_classif_discrete(global_dtype): + X = np.array( + [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype + ) + y = np.array([0, 1, 2, 2, 1]) + + # Here X[:, 0] is the most informative feature, and X[:, 1] is weakly + # informative. + mi = mutual_info_classif(X, y, discrete_features=True) + assert_array_equal(np.argsort(-mi), np.array([0, 2, 1])) + + +def test_mutual_info_regression(global_dtype): + # We generate sample from multivariate normal distribution, using + # transformation from initially uncorrelated variables. The zero + # variables after transformation is selected as the target vector, + # it has the strongest correlation with the variable 2, and + # the weakest correlation with the variable 1. + T = np.array([[1, 0.5, 2, 1], [0, 1, 0.1, 0.0], [0, 0.1, 1, 0.1], [0, 0.1, 0.1, 1]]) + cov = T.dot(T.T) + mean = np.zeros(4) + + rng = check_random_state(0) + Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) + X = Z[:, 1:] + y = Z[:, 0] + + mi = mutual_info_regression(X, y, random_state=0) + assert_array_equal(np.argsort(-mi), np.array([1, 2, 0])) + # XXX: should mutual_info_regression be fixed to avoid + # up-casting float32 inputs to float64? + assert mi.dtype == np.float64 + + +def test_mutual_info_classif_mixed(global_dtype): + # Here the target is discrete and there are two continuous and one + # discrete feature. The idea of this test is clear from the code. + rng = check_random_state(0) + X = rng.rand(1000, 3).astype(global_dtype, copy=False) + X[:, 1] += X[:, 0] + y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int) + X[:, 2] = X[:, 2] > 0.5 + + mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3, random_state=0) + assert_array_equal(np.argsort(-mi), [2, 0, 1]) + for n_neighbors in [5, 7, 9]: + mi_nn = mutual_info_classif( + X, y, discrete_features=[2], n_neighbors=n_neighbors, random_state=0 + ) + # Check that the continuous values have an higher MI with greater + # n_neighbors + assert mi_nn[0] > mi[0] + assert mi_nn[1] > mi[1] + # The n_neighbors should not have any effect on the discrete value + # The MI should be the same + assert mi_nn[2] == mi[2] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_mutual_info_options(global_dtype, csr_container): + X = np.array( + [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype + ) + y = np.array([0, 1, 2, 2, 1], dtype=global_dtype) + X_csr = csr_container(X) + + for mutual_info in (mutual_info_regression, mutual_info_classif): + with pytest.raises(ValueError): + mutual_info(X_csr, y, discrete_features=False) + with pytest.raises(ValueError): + mutual_info(X, y, discrete_features="manual") + with pytest.raises(ValueError): + mutual_info(X_csr, y, discrete_features=[True, False, True]) + with pytest.raises(IndexError): + mutual_info(X, y, discrete_features=[True, False, True, False]) + with pytest.raises(IndexError): + mutual_info(X, y, discrete_features=[1, 4]) + + mi_1 = mutual_info(X, y, discrete_features="auto", random_state=0) + mi_2 = mutual_info(X, y, discrete_features=False, random_state=0) + mi_3 = mutual_info(X_csr, y, discrete_features="auto", random_state=0) + mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0) + mi_5 = mutual_info(X, y, discrete_features=[True, False, True], random_state=0) + mi_6 = mutual_info(X, y, discrete_features=[0, 2], random_state=0) + + assert_allclose(mi_1, mi_2) + assert_allclose(mi_3, mi_4) + assert_allclose(mi_5, mi_6) + + assert not np.allclose(mi_1, mi_3) + + +@pytest.mark.parametrize("correlated", [True, False]) +def test_mutual_information_symmetry_classif_regression(correlated, global_random_seed): + """Check that `mutual_info_classif` and `mutual_info_regression` are + symmetric by switching the target `y` as `feature` in `X` and vice + versa. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23720 + """ + rng = np.random.RandomState(global_random_seed) + n = 100 + d = rng.randint(10, size=n) + + if correlated: + c = d.astype(np.float64) + else: + c = rng.normal(0, 1, size=n) + + mi_classif = mutual_info_classif( + c[:, None], d, discrete_features=[False], random_state=global_random_seed + ) + + mi_regression = mutual_info_regression( + d[:, None], c, discrete_features=[True], random_state=global_random_seed + ) + + assert mi_classif == pytest.approx(mi_regression) + + +def test_mutual_info_regression_X_int_dtype(global_random_seed): + """Check that results agree when X is integer dtype and float dtype. + + Non-regression test for Issue #26696. + """ + rng = np.random.RandomState(global_random_seed) + X = rng.randint(100, size=(100, 10)) + X_float = X.astype(np.float64, copy=True) + y = rng.randint(100, size=100) + + expected = mutual_info_regression(X_float, y, random_state=global_random_seed) + result = mutual_info_regression(X, y, random_state=global_random_seed) + assert_allclose(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py new file mode 100644 index 0000000000000000000000000000000000000000..e3edb0e7b5d213dc4b9445a3cf971a1bc4d28398 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py @@ -0,0 +1,615 @@ +""" +Testing Recursive feature elimination +""" + +from operator import attrgetter + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal + +from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.compose import TransformedTargetRegressor +from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression +from sklearn.datasets import load_iris, make_friedman1 +from sklearn.ensemble import RandomForestClassifier +from sklearn.feature_selection import RFE, RFECV +from sklearn.impute import SimpleImputer +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.metrics import get_scorer, make_scorer, zero_one_loss +from sklearn.model_selection import GroupKFold, cross_val_score +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC, SVR, LinearSVR +from sklearn.utils import check_random_state +from sklearn.utils._testing import ignore_warnings +from sklearn.utils.fixes import CSR_CONTAINERS + + +class MockClassifier: + """ + Dummy classifier to test recursive feature elimination + """ + + def __init__(self, foo_param=0): + self.foo_param = foo_param + + def fit(self, X, y): + assert len(X) == len(y) + self.coef_ = np.ones(X.shape[1], dtype=np.float64) + return self + + def predict(self, T): + return T.shape[0] + + predict_proba = predict + decision_function = predict + transform = predict + + def score(self, X=None, y=None): + return 0.0 + + def get_params(self, deep=True): + return {"foo_param": self.foo_param} + + def set_params(self, **params): + return self + + def _more_tags(self): + return {"allow_nan": True} + + +def test_rfe_features_importance(): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2) + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + assert len(rfe.ranking_) == X.shape[1] + + clf_svc = SVC(kernel="linear") + rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1) + rfe_svc.fit(X, y) + + # Check if the supports are equal + assert_array_equal(rfe.get_support(), rfe_svc.get_support()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_rfe(csr_container): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + X_sparse = csr_container(X) + y = iris.target + + # dense model + clf = SVC(kernel="linear") + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + X_r = rfe.transform(X) + clf.fit(X_r, y) + assert len(rfe.ranking_) == X.shape[1] + + # sparse model + clf_sparse = SVC(kernel="linear") + rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) + rfe_sparse.fit(X_sparse, y) + X_r_sparse = rfe_sparse.transform(X_sparse) + + assert X_r.shape == iris.data.shape + assert_array_almost_equal(X_r[:10], iris.data[:10]) + + assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) + assert rfe.score(X, y) == clf.score(iris.data, iris.target) + assert_array_almost_equal(X_r, X_r_sparse.toarray()) + + +def test_RFE_fit_score_params(): + # Make sure RFE passes the metadata down to fit and score methods of the + # underlying estimator + class TestEstimator(BaseEstimator, ClassifierMixin): + def fit(self, X, y, prop=None): + if prop is None: + raise ValueError("fit: prop cannot be None") + self.svc_ = SVC(kernel="linear").fit(X, y) + self.coef_ = self.svc_.coef_ + return self + + def score(self, X, y, prop=None): + if prop is None: + raise ValueError("score: prop cannot be None") + return self.svc_.score(X, y) + + X, y = load_iris(return_X_y=True) + with pytest.raises(ValueError, match="fit: prop cannot be None"): + RFE(estimator=TestEstimator()).fit(X, y) + with pytest.raises(ValueError, match="score: prop cannot be None"): + RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y) + + RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y, prop="foo") + + +def test_rfe_percent_n_features(): + # test that the results are the same + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + # there are 10 features in the data. We select 40%. + clf = SVC(kernel="linear") + rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe_num.fit(X, y) + + rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1) + rfe_perc.fit(X, y) + + assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_) + assert_array_equal(rfe_perc.support_, rfe_num.support_) + + +def test_rfe_mockclassifier(): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + # dense model + clf = MockClassifier() + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + X_r = rfe.transform(X) + clf.fit(X_r, y) + assert len(rfe.ranking_) == X.shape[1] + assert X_r.shape == iris.data.shape + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_rfecv(csr_container): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Test using the score function + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1) + rfecv.fit(X, y) + # non-regression test for missing worst feature: + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == X.shape[1] + + assert len(rfecv.ranking_) == X.shape[1] + X_r = rfecv.transform(X) + + # All the noisy variable were filtered out + assert_array_equal(X_r, iris.data) + + # same in sparse + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + # Test using a customized loss function + scoring = make_scorer(zero_one_loss, greater_is_better=False) + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring) + ignore_warnings(rfecv.fit)(X, y) + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + # Test using a scorer + scorer = get_scorer("accuracy") + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer) + rfecv.fit(X, y) + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + # Test fix on cv_results_ + def test_scorer(estimator, X, y): + return 1.0 + + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer) + rfecv.fit(X, y) + + # In the event of cross validation score ties, the expected behavior of + # RFECV is to return the FEWEST features that maximize the CV score. + # Because test_scorer always returns 1.0 in this example, RFECV should + # reduce the dimensionality to a single feature (i.e. n_features_ = 1) + assert rfecv.n_features_ == 1 + + # Same as the first two tests, but with step=2 + rfecv = RFECV(estimator=SVC(kernel="linear"), step=2) + rfecv.fit(X, y) + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == 6 + + assert len(rfecv.ranking_) == X.shape[1] + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + # Verifying that steps < 1 don't blow up. + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=0.2) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + +def test_rfecv_mockclassifier(): + generator = check_random_state(0) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Test using the score function + rfecv = RFECV(estimator=MockClassifier(), step=1) + rfecv.fit(X, y) + # non-regression test for missing worst feature: + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == X.shape[1] + + assert len(rfecv.ranking_) == X.shape[1] + + +def test_rfecv_verbose_output(): + # Check verbose=1 is producing an output. + import sys + from io import StringIO + + sys.stdout = StringIO() + + generator = check_random_state(0) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) + + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1) + rfecv.fit(X, y) + + verbose_output = sys.stdout + verbose_output.seek(0) + assert len(verbose_output.readline()) > 0 + + +def test_rfecv_cv_results_size(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Non-regression test for varying combinations of step and + # min_features_to_select. + for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]: + rfecv = RFECV( + estimator=MockClassifier(), + step=step, + min_features_to_select=min_features_to_select, + ) + rfecv.fit(X, y) + + score_len = np.ceil((X.shape[1] - min_features_to_select) / step) + 1 + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == score_len + + assert len(rfecv.ranking_) == X.shape[1] + assert rfecv.n_features_ >= min_features_to_select + + +def test_rfe_estimator_tags(): + rfe = RFE(SVC(kernel="linear")) + assert rfe._estimator_type == "classifier" + # make sure that cross-validation is stratified + iris = load_iris() + score = cross_val_score(rfe, iris.data, iris.target) + assert score.min() > 0.7 + + +def test_rfe_min_step(global_random_seed): + n_features = 10 + X, y = make_friedman1( + n_samples=50, n_features=n_features, random_state=global_random_seed + ) + n_samples, n_features = X.shape + estimator = SVR(kernel="linear") + + # Test when floor(step * n_features) <= 0 + selector = RFE(estimator, step=0.01) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + # Test when step is between (0,1) and floor(step * n_features) > 0 + selector = RFE(estimator, step=0.20) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + # Test when step is an integer + selector = RFE(estimator, step=5) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + +def test_number_of_subsets_of_features(global_random_seed): + # In RFE, 'number_of_subsets_of_features' + # = the number of iterations in '_fit' + # = max(ranking_) + # = 1 + (n_features + step - n_features_to_select - 1) // step + # After optimization #4534, this number + # = 1 + np.ceil((n_features - n_features_to_select) / float(step)) + # This test case is to test their equivalence, refer to #4534 and #3824 + + def formula1(n_features, n_features_to_select, step): + return 1 + ((n_features + step - n_features_to_select - 1) // step) + + def formula2(n_features, n_features_to_select, step): + return 1 + np.ceil((n_features - n_features_to_select) / float(step)) + + # RFE + # Case 1, n_features - n_features_to_select is divisible by step + # Case 2, n_features - n_features_to_select is not divisible by step + n_features_list = [11, 11] + n_features_to_select_list = [3, 3] + step_list = [2, 3] + for n_features, n_features_to_select, step in zip( + n_features_list, n_features_to_select_list, step_list + ): + generator = check_random_state(global_random_seed) + X = generator.normal(size=(100, n_features)) + y = generator.rand(100).round() + rfe = RFE( + estimator=SVC(kernel="linear"), + n_features_to_select=n_features_to_select, + step=step, + ) + rfe.fit(X, y) + # this number also equals to the maximum of ranking_ + assert np.max(rfe.ranking_) == formula1(n_features, n_features_to_select, step) + assert np.max(rfe.ranking_) == formula2(n_features, n_features_to_select, step) + + # In RFECV, 'fit' calls 'RFE._fit' + # 'number_of_subsets_of_features' of RFE + # = the size of each score in 'cv_results_' of RFECV + # = the number of iterations of the for loop before optimization #4534 + + # RFECV, n_features_to_select = 1 + # Case 1, n_features - 1 is divisible by step + # Case 2, n_features - 1 is not divisible by step + + n_features_to_select = 1 + n_features_list = [11, 10] + step_list = [2, 2] + for n_features, step in zip(n_features_list, step_list): + generator = check_random_state(global_random_seed) + X = generator.normal(size=(100, n_features)) + y = generator.rand(100).round() + rfecv = RFECV(estimator=SVC(kernel="linear"), step=step) + rfecv.fit(X, y) + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == formula1( + n_features, n_features_to_select, step + ) + assert len(rfecv.cv_results_[key]) == formula2( + n_features, n_features_to_select, step + ) + + +def test_rfe_cv_n_jobs(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + rfecv = RFECV(estimator=SVC(kernel="linear")) + rfecv.fit(X, y) + rfecv_ranking = rfecv.ranking_ + + rfecv_cv_results_ = rfecv.cv_results_ + + rfecv.set_params(n_jobs=2) + rfecv.fit(X, y) + assert_array_almost_equal(rfecv.ranking_, rfecv_ranking) + + assert rfecv_cv_results_.keys() == rfecv.cv_results_.keys() + for key in rfecv_cv_results_.keys(): + assert rfecv_cv_results_[key] == pytest.approx(rfecv.cv_results_[key]) + + +def test_rfe_cv_groups(): + generator = check_random_state(0) + iris = load_iris() + number_groups = 4 + groups = np.floor(np.linspace(0, number_groups, len(iris.target))) + X = iris.data + y = (iris.target > 0).astype(int) + + est_groups = RFECV( + estimator=RandomForestClassifier(random_state=generator), + step=1, + scoring="accuracy", + cv=GroupKFold(n_splits=2), + ) + est_groups.fit(X, y, groups=groups) + assert est_groups.n_features_ > 0 + + +@pytest.mark.parametrize( + "importance_getter", [attrgetter("regressor_.coef_"), "regressor_.coef_"] +) +@pytest.mark.parametrize("selector, expected_n_features", [(RFE, 5), (RFECV, 4)]) +def test_rfe_wrapped_estimator(importance_getter, selector, expected_n_features): + # Non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/15312 + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = LinearSVR(dual="auto", random_state=0) + + log_estimator = TransformedTargetRegressor( + regressor=estimator, func=np.log, inverse_func=np.exp + ) + + selector = selector(log_estimator, importance_getter=importance_getter) + sel = selector.fit(X, y) + assert sel.support_.sum() == expected_n_features + + +@pytest.mark.parametrize( + "importance_getter, err_type", + [ + ("auto", ValueError), + ("random", AttributeError), + (lambda x: x.importance, AttributeError), + ], +) +@pytest.mark.parametrize("Selector", [RFE, RFECV]) +def test_rfe_importance_getter_validation(importance_getter, err_type, Selector): + X, y = make_friedman1(n_samples=50, n_features=10, random_state=42) + estimator = LinearSVR(dual="auto") + log_estimator = TransformedTargetRegressor( + regressor=estimator, func=np.log, inverse_func=np.exp + ) + + with pytest.raises(err_type): + model = Selector(log_estimator, importance_getter=importance_getter) + model.fit(X, y) + + +@pytest.mark.parametrize("cv", [None, 5]) +def test_rfe_allow_nan_inf_in_x(cv): + iris = load_iris() + X = iris.data + y = iris.target + + # add nan and inf value to X + X[0][0] = np.nan + X[0][1] = np.inf + + clf = MockClassifier() + if cv is not None: + rfe = RFECV(estimator=clf, cv=cv) + else: + rfe = RFE(estimator=clf) + rfe.fit(X, y) + rfe.transform(X) + + +def test_w_pipeline_2d_coef_(): + pipeline = make_pipeline(StandardScaler(), LogisticRegression()) + + data, y = load_iris(return_X_y=True) + sfm = RFE( + pipeline, + n_features_to_select=2, + importance_getter="named_steps.logisticregression.coef_", + ) + + sfm.fit(data, y) + assert sfm.transform(data).shape[1] == 2 + + +def test_rfecv_std_and_mean(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + rfecv = RFECV(estimator=SVC(kernel="linear")) + rfecv.fit(X, y) + n_split_keys = len(rfecv.cv_results_) - 2 + split_keys = [f"split{i}_test_score" for i in range(n_split_keys)] + + cv_scores = np.asarray([rfecv.cv_results_[key] for key in split_keys]) + expected_mean = np.mean(cv_scores, axis=0) + expected_std = np.std(cv_scores, axis=0) + + assert_allclose(rfecv.cv_results_["mean_test_score"], expected_mean) + assert_allclose(rfecv.cv_results_["std_test_score"], expected_std) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +def test_multioutput(ClsRFE): + X = np.random.normal(size=(10, 3)) + y = np.random.randint(2, size=(10, 2)) + clf = RandomForestClassifier(n_estimators=5) + rfe_test = ClsRFE(clf) + rfe_test.fit(X, y) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +def test_pipeline_with_nans(ClsRFE): + """Check that RFE works with pipeline that accept nans. + + Non-regression test for gh-21743. + """ + X, y = load_iris(return_X_y=True) + X[0, 0] = np.nan + + pipe = make_pipeline( + SimpleImputer(), + StandardScaler(), + LogisticRegression(), + ) + + fs = ClsRFE( + estimator=pipe, + importance_getter="named_steps.logisticregression.coef_", + ) + fs.fit(X, y) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) +def test_rfe_pls(ClsRFE, PLSEstimator): + """Check the behaviour of RFE with PLS estimators. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12410 + """ + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = PLSEstimator(n_components=1) + selector = ClsRFE(estimator, step=1).fit(X, y) + assert selector.score(X, y) > 0.5 + + +def test_rfe_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the estimator + does not implement the `decision_function` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + iris = load_iris() + + # `LinearRegression` does not implement 'decision_function' and should raise an + # AttributeError + rfe = RFE(estimator=LinearRegression()) + + outer_msg = "This 'RFE' has no attribute 'decision_function'" + inner_msg = "'LinearRegression' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + rfe.fit(iris.data, iris.target).decision_function(iris.data) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..82d65c55a019512ecef189a881fd9316bd813d70 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py @@ -0,0 +1,323 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.cluster import KMeans +from sklearn.datasets import make_blobs, make_classification, make_regression +from sklearn.ensemble import HistGradientBoostingRegressor +from sklearn.feature_selection import SequentialFeatureSelector +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score +from sklearn.neighbors import KNeighborsClassifier +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_bad_n_features_to_select(): + n_features = 5 + X, y = make_regression(n_features=n_features) + sfs = SequentialFeatureSelector(LinearRegression(), n_features_to_select=n_features) + with pytest.raises(ValueError, match="n_features_to_select must be < n_features"): + sfs.fit(X, y) + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto")) +def test_n_features_to_select(direction, n_features_to_select): + # Make sure n_features_to_select is respected + + n_features = 10 + X, y = make_regression(n_features=n_features, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + + if n_features_to_select == "auto": + n_features_to_select = n_features // 2 + + assert sfs.get_support(indices=True).shape[0] == n_features_to_select + assert sfs.n_features_to_select_ == n_features_to_select + assert sfs.transform(X).shape[1] == n_features_to_select + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +def test_n_features_to_select_auto(direction): + """Check the behaviour of `n_features_to_select="auto"` with different + values for the parameter `tol`. + """ + + n_features = 10 + tol = 1e-3 + X, y = make_regression(n_features=n_features, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + tol=tol, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + + max_features_to_select = n_features - 1 + + assert sfs.get_support(indices=True).shape[0] <= max_features_to_select + assert sfs.n_features_to_select_ <= max_features_to_select + assert sfs.transform(X).shape[1] <= max_features_to_select + assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_ + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +def test_n_features_to_select_stopping_criterion(direction): + """Check the behaviour stopping criterion for feature selection + depending on the values of `n_features_to_select` and `tol`. + + When `direction` is `'forward'`, select a new features at random + among those not currently selected in selector.support_, + build a new version of the data that includes all the features + in selector.support_ + this newly selected feature. + And check that the cross-validation score of the model trained on + this new dataset variant is lower than the model with + the selected forward selected features or at least does not improve + by more than the tol margin. + + When `direction` is `'backward'`, instead of adding a new feature + to selector.support_, try to remove one of those selected features at random + And check that the cross-validation score is either decreasing or + not improving by more than the tol margin. + """ + + X, y = make_regression(n_features=50, n_informative=10, random_state=0) + + tol = 1e-3 + + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + tol=tol, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + selected_X = sfs.transform(X) + + rng = np.random.RandomState(0) + + added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True))) + added_X = np.hstack( + [ + selected_X, + (X[:, rng.choice(added_candidates)])[:, np.newaxis], + ] + ) + + removed_candidate = rng.choice(list(range(sfs.n_features_to_select_))) + removed_X = np.delete(selected_X, removed_candidate, axis=1) + + plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean() + sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean() + added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean() + removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean() + + assert sfs_cv_score >= plain_cv_score + + if direction == "forward": + assert (sfs_cv_score - added_cv_score) <= tol + assert (sfs_cv_score - removed_cv_score) >= tol + else: + assert (added_cv_score - sfs_cv_score) <= tol + assert (removed_cv_score - sfs_cv_score) <= tol + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize( + "n_features_to_select, expected", + ( + (0.1, 1), + (1.0, 10), + (0.5, 5), + ), +) +def test_n_features_to_select_float(direction, n_features_to_select, expected): + # Test passing a float as n_features_to_select + X, y = make_regression(n_features=10) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + assert sfs.n_features_to_select_ == expected + + +@pytest.mark.parametrize("seed", range(10)) +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize( + "n_features_to_select, expected_selected_features", + [ + (2, [0, 2]), # f1 is dropped since it has no predictive power + (1, [2]), # f2 is more predictive than f0 so it's kept + ], +) +def test_sanity(seed, direction, n_features_to_select, expected_selected_features): + # Basic sanity check: 3 features, only f0 and f2 are correlated with the + # target, f2 having a stronger correlation than f0. We expect f1 to be + # dropped, and f2 to always be selected. + + rng = np.random.RandomState(seed) + n_samples = 100 + X = rng.randn(n_samples, 3) + y = 3 * X[:, 0] - 10 * X[:, 2] + + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + assert_array_equal(sfs.get_support(indices=True), expected_selected_features) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_support(csr_container): + # Make sure sparse data is supported + + X, y = make_regression(n_features=10) + X = csr_container(X) + sfs = SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ) + sfs.fit(X, y) + sfs.transform(X) + + +def test_nan_support(): + # Make sure nans are OK if the underlying estimator supports nans + + rng = np.random.RandomState(0) + n_samples, n_features = 40, 4 + X, y = make_regression(n_samples, n_features, random_state=0) + nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool) + X[nan_mask] = np.nan + sfs = SequentialFeatureSelector( + HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2 + ) + sfs.fit(X, y) + sfs.transform(X) + + with pytest.raises(ValueError, match="Input X contains NaN"): + # LinearRegression does not support nans + SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ).fit(X, y) + + +def test_pipeline_support(): + # Make sure that pipelines can be passed into SFS and that SFS can be + # passed into a pipeline + + n_samples, n_features = 50, 3 + X, y = make_regression(n_samples, n_features, random_state=0) + + # pipeline in SFS + pipe = make_pipeline(StandardScaler(), LinearRegression()) + sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2) + sfs.fit(X, y) + sfs.transform(X) + + # SFS in pipeline + sfs = SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ) + pipe = make_pipeline(StandardScaler(), sfs) + pipe.fit(X, y) + pipe.transform(X) + + +@pytest.mark.parametrize("n_features_to_select", (2, 3)) +def test_unsupervised_model_fit(n_features_to_select): + # Make sure that models without classification labels are not being + # validated + + X, y = make_blobs(n_features=4) + sfs = SequentialFeatureSelector( + KMeans(n_init=1), + n_features_to_select=n_features_to_select, + ) + sfs.fit(X) + assert sfs.transform(X).shape[1] == n_features_to_select + + +@pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3)) +def test_no_y_validation_model_fit(y): + # Make sure that other non-conventional y labels are not accepted + + X, clusters = make_blobs(n_features=6) + sfs = SequentialFeatureSelector( + KMeans(), + n_features_to_select=3, + ) + + with pytest.raises((TypeError, ValueError)): + sfs.fit(X, y) + + +def test_forward_neg_tol_error(): + """Check that we raise an error when tol<0 and direction='forward'""" + X, y = make_regression(n_features=10, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + direction="forward", + tol=-1e-3, + ) + + with pytest.raises(ValueError, match="tol must be positive"): + sfs.fit(X, y) + + +def test_backward_neg_tol(): + """Check that SequentialFeatureSelector works negative tol + + non-regression test for #25525 + """ + X, y = make_regression(n_features=10, random_state=0) + lr = LinearRegression() + initial_score = lr.fit(X, y).score(X, y) + + sfs = SequentialFeatureSelector( + lr, + n_features_to_select="auto", + direction="backward", + tol=-1e-3, + ) + Xr = sfs.fit_transform(X, y) + new_score = lr.fit(Xr, y).score(Xr, y) + + assert 0 < sfs.get_support().sum() < X.shape[1] + assert new_score < initial_score + + +def test_cv_generator_support(): + """Check that no exception raised when cv is generator + + non-regression test for #25957 + """ + X, y = make_classification(random_state=0) + + groups = np.zeros_like(y, dtype=int) + groups[y.size // 2 :] = 1 + + cv = LeaveOneGroupOut() + splits = cv.split(X, y, groups=groups) + + knc = KNeighborsClassifier(n_neighbors=5) + + sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits) + sfs.fit(X, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..45e66cb338a4b7a5a410db669a13f6f9213451dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py @@ -0,0 +1,72 @@ +import numpy as np +import pytest + +from sklearn.feature_selection import VarianceThreshold +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import BSR_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]] + +data2 = [[-0.13725701]] * 10 + + +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_zero_variance(sparse_container): + # Test VarianceThreshold with default setting, zero variance. + X = data if sparse_container is None else sparse_container(data) + sel = VarianceThreshold().fit(X) + assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True)) + + +def test_zero_variance_value_error(): + # Test VarianceThreshold with default setting, zero variance, error cases. + with pytest.raises(ValueError): + VarianceThreshold().fit([[0, 1, 2, 3]]) + with pytest.raises(ValueError): + VarianceThreshold().fit([[0, 1], [0, 1]]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_variance_threshold(sparse_container): + # Test VarianceThreshold with custom variance. + X = data if sparse_container is None else sparse_container(data) + X = VarianceThreshold(threshold=0.4).fit_transform(X) + assert (len(data), 1) == X.shape + + +@pytest.mark.skipif( + np.var(data2) == 0, + reason=( + "This test is not valid for this platform, " + "as it relies on numerical instabilities." + ), +) +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_zero_variance_floating_point_error(sparse_container): + # Test that VarianceThreshold(0.0).fit eliminates features that have + # the same value in every sample, even when floating point errors + # cause np.var not to be 0 for the feature. + # See #13691 + X = data2 if sparse_container is None else sparse_container(data2) + msg = "No feature in X meets the variance threshold 0.00000" + with pytest.raises(ValueError, match=msg): + VarianceThreshold().fit(X) + + +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_variance_nan(sparse_container): + arr = np.array(data, dtype=np.float64) + # add single NaN and feature should still be included + arr[0, 0] = np.nan + # make all values in feature NaN and feature should be rejected + arr[:, 1] = np.nan + + X = arr if sparse_container is None else sparse_container(arr) + sel = VarianceThreshold().fit(X) + assert_array_equal([0, 3, 4], sel.get_support(indices=True)) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e305bc2a657dc042d63dfd42fb8aa9734365ccbf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__init__.py @@ -0,0 +1,24 @@ +"""Transformers for missing value imputation""" +import typing + +from ._base import MissingIndicator, SimpleImputer +from ._knn import KNNImputer + +if typing.TYPE_CHECKING: + # Avoid errors in type checkers (e.g. mypy) for experimental estimators. + # TODO: remove this check once the estimator is no longer experimental. + from ._iterative import IterativeImputer # noqa + +__all__ = ["MissingIndicator", "SimpleImputer", "KNNImputer"] + + +# TODO: remove this check once the estimator is no longer experimental. +def __getattr__(name): + if name == "IterativeImputer": + raise ImportError( + f"{name} is experimental and the API might change without any " + "deprecation cycle. To use it, you need to explicitly import " + "enable_iterative_imputer:\n" + "from sklearn.experimental import enable_iterative_imputer" + ) + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f6c87dd6dd0a1c4205fed51812413e61e0f186c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49ef95daf08c0b354603718395d8255195762127 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7eedd76168ad00b8cc81419f54c22490a7bcd85c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db714a1cffaa602b61922307414da9717d3c606e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..35aefda68d8f8342234d17a06144bee3711f1d44 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_base.py @@ -0,0 +1,1075 @@ +# Authors: Nicolas Tresegnie +# Sergey Feldman +# License: BSD 3 clause + +import numbers +import warnings +from collections import Counter +from functools import partial + +import numpy as np +import numpy.ma as ma +from scipy import sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import _is_pandas_na, is_scalar_nan +from ..utils._mask import _get_mask +from ..utils._param_validation import MissingValues, StrOptions +from ..utils.fixes import _mode +from ..utils.sparsefuncs import _get_median +from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted + + +def _check_inputs_dtype(X, missing_values): + if _is_pandas_na(missing_values): + # Allow using `pd.NA` as missing values to impute numerical arrays. + return + if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real): + raise ValueError( + "'X' and 'missing_values' types are expected to be" + " both numerical. Got X.dtype={} and " + " type(missing_values)={}.".format(X.dtype, type(missing_values)) + ) + + +def _most_frequent(array, extra_value, n_repeat): + """Compute the most frequent value in a 1d array extended with + [extra_value] * n_repeat, where extra_value is assumed to be not part + of the array.""" + # Compute the most frequent value in array only + if array.size > 0: + if array.dtype == object: + # scipy.stats.mode is slow with object dtype array. + # Python Counter is more efficient + counter = Counter(array) + most_frequent_count = counter.most_common(1)[0][1] + # tie breaking similarly to scipy.stats.mode + most_frequent_value = min( + value + for value, count in counter.items() + if count == most_frequent_count + ) + else: + mode = _mode(array) + most_frequent_value = mode[0][0] + most_frequent_count = mode[1][0] + else: + most_frequent_value = 0 + most_frequent_count = 0 + + # Compare to array + [extra_value] * n_repeat + if most_frequent_count == 0 and n_repeat == 0: + return np.nan + elif most_frequent_count < n_repeat: + return extra_value + elif most_frequent_count > n_repeat: + return most_frequent_value + elif most_frequent_count == n_repeat: + # tie breaking similarly to scipy.stats.mode + return min(most_frequent_value, extra_value) + + +class _BaseImputer(TransformerMixin, BaseEstimator): + """Base class for all imputers. + + It adds automatically support for `add_indicator`. + """ + + _parameter_constraints: dict = { + "missing_values": [MissingValues()], + "add_indicator": ["boolean"], + "keep_empty_features": ["boolean"], + } + + def __init__( + self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False + ): + self.missing_values = missing_values + self.add_indicator = add_indicator + self.keep_empty_features = keep_empty_features + + def _fit_indicator(self, X): + """Fit a MissingIndicator.""" + if self.add_indicator: + self.indicator_ = MissingIndicator( + missing_values=self.missing_values, error_on_new=False + ) + self.indicator_._fit(X, precomputed=True) + else: + self.indicator_ = None + + def _transform_indicator(self, X): + """Compute the indicator mask.' + + Note that X must be the original data as passed to the imputer before + any imputation, since imputation may be done inplace in some cases. + """ + if self.add_indicator: + if not hasattr(self, "indicator_"): + raise ValueError( + "Make sure to call _fit_indicator before _transform_indicator" + ) + return self.indicator_.transform(X) + + def _concatenate_indicator(self, X_imputed, X_indicator): + """Concatenate indicator mask with the imputed data.""" + if not self.add_indicator: + return X_imputed + + if sp.issparse(X_imputed): + # sp.hstack may result in different formats between sparse arrays and + # matrices; specify the format to keep consistent behavior + hstack = partial(sp.hstack, format=X_imputed.format) + else: + hstack = np.hstack + + if X_indicator is None: + raise ValueError( + "Data from the missing indicator are not provided. Call " + "_fit_indicator and _transform_indicator in the imputer " + "implementation." + ) + + return hstack((X_imputed, X_indicator)) + + def _concatenate_indicator_feature_names_out(self, names, input_features): + if not self.add_indicator: + return names + + indicator_names = self.indicator_.get_feature_names_out(input_features) + return np.concatenate([names, indicator_names]) + + def _more_tags(self): + return {"allow_nan": is_scalar_nan(self.missing_values)} + + +class SimpleImputer(_BaseImputer): + """Univariate imputer for completing missing values with simple strategies. + + Replace missing values using a descriptive statistic (e.g. mean, median, or + most frequent) along each column, or using a constant value. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer` + estimator which is now removed. + + Parameters + ---------- + missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + can be set to either `np.nan` or `pd.NA`. + + strategy : str, default='mean' + The imputation strategy. + + - If "mean", then replace missing values using the mean along + each column. Can only be used with numeric data. + - If "median", then replace missing values using the median along + each column. Can only be used with numeric data. + - If "most_frequent", then replace missing using the most frequent + value along each column. Can be used with strings or numeric data. + If there is more than one such value, only the smallest is returned. + - If "constant", then replace missing values with fill_value. Can be + used with strings or numeric data. + + .. versionadded:: 0.20 + strategy="constant" for fixed value imputation. + + fill_value : str or numerical value, default=None + When strategy == "constant", `fill_value` is used to replace all + occurrences of missing_values. For string or object data types, + `fill_value` must be a string. + If `None`, `fill_value` will be 0 when imputing numerical + data and "missing_value" for strings or object data types. + + copy : bool, default=True + If True, a copy of X will be created. If False, imputation will + be done in-place whenever possible. Note that, in the following cases, + a new copy will always be made, even if `copy=False`: + + - If `X` is not an array of floating values; + - If `X` is encoded as a CSR matrix; + - If `add_indicator=True`. + + add_indicator : bool, default=False + If True, a :class:`MissingIndicator` transform will stack onto output + of the imputer's transform. This allows a predictive estimator + to account for missingness despite imputation. If a feature has no + missing values at fit/train time, the feature won't appear on + the missing indicator even if there are missing values at + transform/test time. + + keep_empty_features : bool, default=False + If True, features that consist exclusively of missing values when + `fit` is called are returned in results when `transform` is called. + The imputed value is always `0` except when `strategy="constant"` + in which case `fill_value` will be used instead. + + .. versionadded:: 1.2 + + Attributes + ---------- + statistics_ : array of shape (n_features,) + The imputation fill value for each feature. + Computing statistics can result in `np.nan` values. + During :meth:`transform`, features corresponding to `np.nan` + statistics will be discarded. + + indicator_ : :class:`~sklearn.impute.MissingIndicator` + Indicator used to add binary indicators for missing values. + `None` if `add_indicator=False`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + IterativeImputer : Multivariate imputer that estimates values to impute for + each feature with missing values from all the others. + KNNImputer : Multivariate imputer that estimates missing features using + nearest samples. + + Notes + ----- + Columns which only contained missing values at :meth:`fit` are discarded + upon :meth:`transform` if strategy is not `"constant"`. + + In a prediction context, simple imputation usually performs poorly when + associated with a weak learner. However, with a powerful learner, it can + lead to as good or better performance than complex imputation such as + :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.impute import SimpleImputer + >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean') + >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) + SimpleImputer() + >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] + >>> print(imp_mean.transform(X)) + [[ 7. 2. 3. ] + [ 4. 3.5 6. ] + [10. 3.5 9. ]] + + For a more detailed example see + :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`. + """ + + _parameter_constraints: dict = { + **_BaseImputer._parameter_constraints, + "strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})], + "fill_value": "no_validation", # any object is valid + "copy": ["boolean"], + } + + def __init__( + self, + *, + missing_values=np.nan, + strategy="mean", + fill_value=None, + copy=True, + add_indicator=False, + keep_empty_features=False, + ): + super().__init__( + missing_values=missing_values, + add_indicator=add_indicator, + keep_empty_features=keep_empty_features, + ) + self.strategy = strategy + self.fill_value = fill_value + self.copy = copy + + def _validate_input(self, X, in_fit): + if self.strategy in ("most_frequent", "constant"): + # If input is a list of strings, dtype = object. + # Otherwise ValueError is raised in SimpleImputer + # with strategy='most_frequent' or 'constant' + # because the list is converted to Unicode numpy array + if isinstance(X, list) and any( + isinstance(elem, str) for row in X for elem in row + ): + dtype = object + else: + dtype = None + else: + dtype = FLOAT_DTYPES + + if not in_fit and self._fit_dtype.kind == "O": + # Use object dtype if fitted on object dtypes + dtype = self._fit_dtype + + if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values): + force_all_finite = "allow-nan" + else: + force_all_finite = True + + try: + X = self._validate_data( + X, + reset=in_fit, + accept_sparse="csc", + dtype=dtype, + force_all_finite=force_all_finite, + copy=self.copy, + ) + except ValueError as ve: + if "could not convert" in str(ve): + new_ve = ValueError( + "Cannot use {} strategy with non-numeric data:\n{}".format( + self.strategy, ve + ) + ) + raise new_ve from None + else: + raise ve + + if in_fit: + # Use the dtype seen in `fit` for non-`fit` conversion + self._fit_dtype = X.dtype + + _check_inputs_dtype(X, self.missing_values) + if X.dtype.kind not in ("i", "u", "f", "O"): + raise ValueError( + "SimpleImputer does not support data with dtype " + "{0}. Please provide either a numeric array (with" + " a floating point or integer dtype) or " + "categorical data represented either as an array " + "with integer dtype or an array of string values " + "with an object dtype.".format(X.dtype) + ) + + if sp.issparse(X) and self.missing_values == 0: + # missing_values = 0 not allowed with sparse data as it would + # force densification + raise ValueError( + "Imputation not possible when missing_values " + "== 0 and input is sparse. Provide a dense " + "array instead." + ) + + if self.strategy == "constant": + if in_fit and self.fill_value is not None: + fill_value_dtype = type(self.fill_value) + err_msg = ( + f"fill_value={self.fill_value!r} (of type {fill_value_dtype!r}) " + f"cannot be cast to the input data that is {X.dtype!r}. Make sure " + "that both dtypes are of the same kind." + ) + elif not in_fit: + fill_value_dtype = self.statistics_.dtype + err_msg = ( + f"The dtype of the filling value (i.e. {fill_value_dtype!r}) " + f"cannot be cast to the input data that is {X.dtype!r}. Make sure " + "that the dtypes of the input data is of the same kind between " + "fit and transform." + ) + else: + # By default, fill_value=None, and the replacement is always + # compatible with the input data + fill_value_dtype = X.dtype + + # Make sure we can safely cast fill_value dtype to the input data dtype + if not np.can_cast(fill_value_dtype, X.dtype, casting="same_kind"): + raise ValueError(err_msg) + + return X + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the imputer on `X`. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted estimator. + """ + X = self._validate_input(X, in_fit=True) + + # default fill_value is 0 for numerical input and "missing_value" + # otherwise + if self.fill_value is None: + if X.dtype.kind in ("i", "u", "f"): + fill_value = 0 + else: + fill_value = "missing_value" + else: + fill_value = self.fill_value + + if sp.issparse(X): + self.statistics_ = self._sparse_fit( + X, self.strategy, self.missing_values, fill_value + ) + else: + self.statistics_ = self._dense_fit( + X, self.strategy, self.missing_values, fill_value + ) + + return self + + def _sparse_fit(self, X, strategy, missing_values, fill_value): + """Fit the transformer on sparse data.""" + missing_mask = _get_mask(X, missing_values) + mask_data = missing_mask.data + n_implicit_zeros = X.shape[0] - np.diff(X.indptr) + + statistics = np.empty(X.shape[1]) + + if strategy == "constant": + # for constant strategy, self.statistics_ is used to store + # fill_value in each column + statistics.fill(fill_value) + else: + for i in range(X.shape[1]): + column = X.data[X.indptr[i] : X.indptr[i + 1]] + mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]] + column = column[~mask_column] + + # combine explicit and implicit zeros + mask_zeros = _get_mask(column, 0) + column = column[~mask_zeros] + n_explicit_zeros = mask_zeros.sum() + n_zeros = n_implicit_zeros[i] + n_explicit_zeros + + if len(column) == 0 and self.keep_empty_features: + # in case we want to keep columns with only missing values. + statistics[i] = 0 + else: + if strategy == "mean": + s = column.size + n_zeros + statistics[i] = np.nan if s == 0 else column.sum() / s + + elif strategy == "median": + statistics[i] = _get_median(column, n_zeros) + + elif strategy == "most_frequent": + statistics[i] = _most_frequent(column, 0, n_zeros) + + super()._fit_indicator(missing_mask) + + return statistics + + def _dense_fit(self, X, strategy, missing_values, fill_value): + """Fit the transformer on dense data.""" + missing_mask = _get_mask(X, missing_values) + masked_X = ma.masked_array(X, mask=missing_mask) + + super()._fit_indicator(missing_mask) + + # Mean + if strategy == "mean": + mean_masked = np.ma.mean(masked_X, axis=0) + # Avoid the warning "Warning: converting a masked element to nan." + mean = np.ma.getdata(mean_masked) + mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan + + return mean + + # Median + elif strategy == "median": + median_masked = np.ma.median(masked_X, axis=0) + # Avoid the warning "Warning: converting a masked element to nan." + median = np.ma.getdata(median_masked) + median[np.ma.getmaskarray(median_masked)] = ( + 0 if self.keep_empty_features else np.nan + ) + + return median + + # Most frequent + elif strategy == "most_frequent": + # Avoid use of scipy.stats.mstats.mode due to the required + # additional overhead and slow benchmarking performance. + # See Issue 14325 and PR 14399 for full discussion. + + # To be able access the elements by columns + X = X.transpose() + mask = missing_mask.transpose() + + if X.dtype.kind == "O": + most_frequent = np.empty(X.shape[0], dtype=object) + else: + most_frequent = np.empty(X.shape[0]) + + for i, (row, row_mask) in enumerate(zip(X[:], mask[:])): + row_mask = np.logical_not(row_mask).astype(bool) + row = row[row_mask] + if len(row) == 0 and self.keep_empty_features: + most_frequent[i] = 0 + else: + most_frequent[i] = _most_frequent(row, np.nan, 0) + + return most_frequent + + # Constant + elif strategy == "constant": + # for constant strategy, self.statistcs_ is used to store + # fill_value in each column + return np.full(X.shape[1], fill_value, dtype=X.dtype) + + def transform(self, X): + """Impute all missing values in `X`. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + X_imputed : {ndarray, sparse matrix} of shape \ + (n_samples, n_features_out) + `X` with imputed values. + """ + check_is_fitted(self) + + X = self._validate_input(X, in_fit=False) + statistics = self.statistics_ + + if X.shape[1] != statistics.shape[0]: + raise ValueError( + "X has %d features per sample, expected %d" + % (X.shape[1], self.statistics_.shape[0]) + ) + + # compute mask before eliminating invalid features + missing_mask = _get_mask(X, self.missing_values) + + # Decide whether to keep missing features + if self.strategy == "constant" or self.keep_empty_features: + valid_statistics = statistics + valid_statistics_indexes = None + else: + # same as np.isnan but also works for object dtypes + invalid_mask = _get_mask(statistics, np.nan) + valid_mask = np.logical_not(invalid_mask) + valid_statistics = statistics[valid_mask] + valid_statistics_indexes = np.flatnonzero(valid_mask) + + if invalid_mask.any(): + invalid_features = np.arange(X.shape[1])[invalid_mask] + # use feature names warning if features are provided + if hasattr(self, "feature_names_in_"): + invalid_features = self.feature_names_in_[invalid_features] + warnings.warn( + "Skipping features without any observed values:" + f" {invalid_features}. At least one non-missing value is needed" + f" for imputation with strategy='{self.strategy}'." + ) + X = X[:, valid_statistics_indexes] + + # Do actual imputation + if sp.issparse(X): + if self.missing_values == 0: + raise ValueError( + "Imputation not possible when missing_values " + "== 0 and input is sparse. Provide a dense " + "array instead." + ) + else: + # if no invalid statistics are found, use the mask computed + # before, else recompute mask + if valid_statistics_indexes is None: + mask = missing_mask.data + else: + mask = _get_mask(X.data, self.missing_values) + indexes = np.repeat( + np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr) + )[mask] + + X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) + else: + # use mask computed before eliminating invalid mask + if valid_statistics_indexes is None: + mask_valid_features = missing_mask + else: + mask_valid_features = missing_mask[:, valid_statistics_indexes] + n_missing = np.sum(mask_valid_features, axis=0) + values = np.repeat(valid_statistics, n_missing) + coordinates = np.where(mask_valid_features.transpose())[::-1] + + X[coordinates] = values + + X_indicator = super()._transform_indicator(missing_mask) + + return super()._concatenate_indicator(X, X_indicator) + + def inverse_transform(self, X): + """Convert the data back to the original representation. + + Inverts the `transform` operation performed on an array. + This operation can only be performed after :class:`SimpleImputer` is + instantiated with `add_indicator=True`. + + Note that `inverse_transform` can only invert the transform in + features that have binary indicators for missing values. If a feature + has no missing values at `fit` time, the feature won't have a binary + indicator, and the imputation done at `transform` time won't be + inverted. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : array-like of shape \ + (n_samples, n_features + n_features_missing_indicator) + The imputed data to be reverted to original data. It has to be + an augmented array of imputed data and the missing indicator mask. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + The original `X` with missing values as it was prior + to imputation. + """ + check_is_fitted(self) + + if not self.add_indicator: + raise ValueError( + "'inverse_transform' works only when " + "'SimpleImputer' is instantiated with " + "'add_indicator=True'. " + f"Got 'add_indicator={self.add_indicator}' " + "instead." + ) + + n_features_missing = len(self.indicator_.features_) + non_empty_feature_count = X.shape[1] - n_features_missing + array_imputed = X[:, :non_empty_feature_count].copy() + missing_mask = X[:, non_empty_feature_count:].astype(bool) + + n_features_original = len(self.statistics_) + shape_original = (X.shape[0], n_features_original) + X_original = np.zeros(shape_original) + X_original[:, self.indicator_.features_] = missing_mask + full_mask = X_original.astype(bool) + + imputed_idx, original_idx = 0, 0 + while imputed_idx < len(array_imputed.T): + if not np.all(X_original[:, original_idx]): + X_original[:, original_idx] = array_imputed.T[imputed_idx] + imputed_idx += 1 + original_idx += 1 + else: + original_idx += 1 + + X_original[full_mask] = self.missing_values + return X_original + + def _more_tags(self): + return { + "allow_nan": _is_pandas_na(self.missing_values) or is_scalar_nan( + self.missing_values + ) + } + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan)) + names = input_features[non_missing_mask] + return self._concatenate_indicator_feature_names_out(names, input_features) + + +class MissingIndicator(TransformerMixin, BaseEstimator): + """Binary indicators for missing values. + + Note that this component typically should not be used in a vanilla + :class:`~sklearn.pipeline.Pipeline` consisting of transformers and a + classifier, but rather could be added using a + :class:`~sklearn.pipeline.FeatureUnion` or + :class:`~sklearn.compose.ColumnTransformer`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + missing_values : int, float, str, np.nan or None, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. + + features : {'missing-only', 'all'}, default='missing-only' + Whether the imputer mask should represent all or a subset of + features. + + - If `'missing-only'` (default), the imputer mask will only represent + features containing missing values during fit time. + - If `'all'`, the imputer mask will represent all features. + + sparse : bool or 'auto', default='auto' + Whether the imputer mask format should be sparse or dense. + + - If `'auto'` (default), the imputer mask will be of same type as + input. + - If `True`, the imputer mask will be a sparse matrix. + - If `False`, the imputer mask will be a numpy array. + + error_on_new : bool, default=True + If `True`, :meth:`transform` will raise an error when there are + features with missing values that have no missing values in + :meth:`fit`. This is applicable only when `features='missing-only'`. + + Attributes + ---------- + features_ : ndarray of shape (n_missing_features,) or (n_features,) + The features indices which will be returned when calling + :meth:`transform`. They are computed during :meth:`fit`. If + `features='all'`, `features_` is equal to `range(n_features)`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SimpleImputer : Univariate imputation of missing values. + IterativeImputer : Multivariate imputation of missing values. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.impute import MissingIndicator + >>> X1 = np.array([[np.nan, 1, 3], + ... [4, 0, np.nan], + ... [8, 1, 0]]) + >>> X2 = np.array([[5, 1, np.nan], + ... [np.nan, 2, 3], + ... [2, 4, 0]]) + >>> indicator = MissingIndicator() + >>> indicator.fit(X1) + MissingIndicator() + >>> X2_tr = indicator.transform(X2) + >>> X2_tr + array([[False, True], + [ True, False], + [False, False]]) + """ + + _parameter_constraints: dict = { + "missing_values": [MissingValues()], + "features": [StrOptions({"missing-only", "all"})], + "sparse": ["boolean", StrOptions({"auto"})], + "error_on_new": ["boolean"], + } + + def __init__( + self, + *, + missing_values=np.nan, + features="missing-only", + sparse="auto", + error_on_new=True, + ): + self.missing_values = missing_values + self.features = features + self.sparse = sparse + self.error_on_new = error_on_new + + def _get_missing_features_info(self, X): + """Compute the imputer mask and the indices of the features + containing missing values. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input data with missing values. Note that `X` has been + checked in :meth:`fit` and :meth:`transform` before to call this + function. + + Returns + ------- + imputer_mask : {ndarray, sparse matrix} of shape \ + (n_samples, n_features) + The imputer mask of the original data. + + features_with_missing : ndarray of shape (n_features_with_missing) + The features containing missing values. + """ + if not self._precomputed: + imputer_mask = _get_mask(X, self.missing_values) + else: + imputer_mask = X + + if sp.issparse(X): + imputer_mask.eliminate_zeros() + + if self.features == "missing-only": + n_missing = imputer_mask.getnnz(axis=0) + + if self.sparse is False: + imputer_mask = imputer_mask.toarray() + elif imputer_mask.format == "csr": + imputer_mask = imputer_mask.tocsc() + else: + if not self._precomputed: + imputer_mask = _get_mask(X, self.missing_values) + else: + imputer_mask = X + + if self.features == "missing-only": + n_missing = imputer_mask.sum(axis=0) + + if self.sparse is True: + imputer_mask = sp.csc_matrix(imputer_mask) + + if self.features == "all": + features_indices = np.arange(X.shape[1]) + else: + features_indices = np.flatnonzero(n_missing) + + return imputer_mask, features_indices + + def _validate_input(self, X, in_fit): + if not is_scalar_nan(self.missing_values): + force_all_finite = True + else: + force_all_finite = "allow-nan" + X = self._validate_data( + X, + reset=in_fit, + accept_sparse=("csc", "csr"), + dtype=None, + force_all_finite=force_all_finite, + ) + _check_inputs_dtype(X, self.missing_values) + if X.dtype.kind not in ("i", "u", "f", "O"): + raise ValueError( + "MissingIndicator does not support data with " + "dtype {0}. Please provide either a numeric array" + " (with a floating point or integer dtype) or " + "categorical data represented either as an array " + "with integer dtype or an array of string values " + "with an object dtype.".format(X.dtype) + ) + + if sp.issparse(X) and self.missing_values == 0: + # missing_values = 0 not allowed with sparse data as it would + # force densification + raise ValueError( + "Sparse input with missing_values=0 is " + "not supported. Provide a dense " + "array instead." + ) + + return X + + def _fit(self, X, y=None, precomputed=False): + """Fit the transformer on `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + If `precomputed=True`, then `X` is a mask of the input data. + + precomputed : bool + Whether the input data is a mask. + + Returns + ------- + imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \ + n_features) + The imputer mask of the original data. + """ + if precomputed: + if not (hasattr(X, "dtype") and X.dtype.kind == "b"): + raise ValueError("precomputed is True but the input data is not a mask") + self._precomputed = True + else: + self._precomputed = False + + # Need not validate X again as it would have already been validated + # in the Imputer calling MissingIndicator + if not self._precomputed: + X = self._validate_input(X, in_fit=True) + else: + # only create `n_features_in_` in the precomputed case + self._check_n_features(X, reset=True) + + self._n_features = X.shape[1] + + missing_features_info = self._get_missing_features_info(X) + self.features_ = missing_features_info[1] + + return missing_features_info[0] + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the transformer on `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Fitted estimator. + """ + self._fit(X, y) + + return self + + def transform(self, X): + """Generate missing values indicator for `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \ + or (n_samples, n_features_with_missing) + The missing indicator for input data. The data type of `Xt` + will be boolean. + """ + check_is_fitted(self) + + # Need not validate X again as it would have already been validated + # in the Imputer calling MissingIndicator + if not self._precomputed: + X = self._validate_input(X, in_fit=False) + else: + if not (hasattr(X, "dtype") and X.dtype.kind == "b"): + raise ValueError("precomputed is True but the input data is not a mask") + + imputer_mask, features = self._get_missing_features_info(X) + + if self.features == "missing-only": + features_diff_fit_trans = np.setdiff1d(features, self.features_) + if self.error_on_new and features_diff_fit_trans.size > 0: + raise ValueError( + "The features {} have missing values " + "in transform but have no missing values " + "in fit.".format(features_diff_fit_trans) + ) + + if self.features_.size < self._n_features: + imputer_mask = imputer_mask[:, self.features_] + + return imputer_mask + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Generate missing values indicator for `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data to complete. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \ + or (n_samples, n_features_with_missing) + The missing indicator for input data. The data type of `Xt` + will be boolean. + """ + imputer_mask = self._fit(X, y) + + if self.features_.size < self._n_features: + imputer_mask = imputer_mask[:, self.features_] + + return imputer_mask + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + prefix = self.__class__.__name__.lower() + return np.asarray( + [ + f"{prefix}_{feature_name}" + for feature_name in input_features[self.features_] + ], + dtype=object, + ) + + def _more_tags(self): + return { + "allow_nan": True, + "X_types": ["2darray", "string"], + "preserves_dtype": [], + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_iterative.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_iterative.py new file mode 100644 index 0000000000000000000000000000000000000000..11bca36773e6493b38359eb4bb6e3f6300a6aa5f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_iterative.py @@ -0,0 +1,906 @@ +import warnings +from collections import namedtuple +from numbers import Integral, Real +from time import time + +import numpy as np +from scipy import stats + +from ..base import _fit_context, clone +from ..exceptions import ConvergenceWarning +from ..preprocessing import normalize +from ..utils import ( + _safe_assign, + _safe_indexing, + check_array, + check_random_state, + is_scalar_nan, +) +from ..utils._mask import _get_mask +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted +from ._base import SimpleImputer, _BaseImputer, _check_inputs_dtype + +_ImputerTriplet = namedtuple( + "_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"] +) + + +def _assign_where(X1, X2, cond): + """Assign X2 to X1 where cond is True. + + Parameters + ---------- + X1 : ndarray or dataframe of shape (n_samples, n_features) + Data. + + X2 : ndarray of shape (n_samples, n_features) + Data to be assigned. + + cond : ndarray of shape (n_samples, n_features) + Boolean mask to assign data. + """ + if hasattr(X1, "mask"): # pandas dataframes + X1.mask(cond=cond, other=X2, inplace=True) + else: # ndarrays + X1[cond] = X2[cond] + + +class IterativeImputer(_RoutingNotSupportedMixin, _BaseImputer): + """Multivariate imputer that estimates each feature from all the others. + + A strategy for imputing missing values by modeling each feature with + missing values as a function of other features in a round-robin fashion. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.21 + + .. note:: + + This estimator is still **experimental** for now: the predictions + and the API might change without any deprecation cycle. To use it, + you need to explicitly import `enable_iterative_imputer`:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_iterative_imputer # noqa + >>> # now you can import normally from sklearn.impute + >>> from sklearn.impute import IterativeImputer + + Parameters + ---------- + estimator : estimator object, default=BayesianRidge() + The estimator to use at each step of the round-robin imputation. + If `sample_posterior=True`, the estimator must support + `return_std` in its `predict` method. + + missing_values : int or np.nan, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. + + sample_posterior : bool, default=False + Whether to sample from the (Gaussian) predictive posterior of the + fitted estimator for each imputation. Estimator must support + `return_std` in its `predict` method if set to `True`. Set to + `True` if using `IterativeImputer` for multiple imputations. + + max_iter : int, default=10 + Maximum number of imputation rounds to perform before returning the + imputations computed during the final round. A round is a single + imputation of each feature with missing values. The stopping criterion + is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`, + where `X_t` is `X` at iteration `t`. Note that early stopping is only + applied if `sample_posterior=False`. + + tol : float, default=1e-3 + Tolerance of the stopping condition. + + n_nearest_features : int, default=None + Number of other features to use to estimate the missing values of + each feature column. Nearness between features is measured using + the absolute correlation coefficient between each feature pair (after + initial imputation). To ensure coverage of features throughout the + imputation process, the neighbor features are not necessarily nearest, + but are drawn with probability proportional to correlation for each + imputed target feature. Can provide significant speed-up when the + number of features is huge. If `None`, all features will be used. + + initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \ + default='mean' + Which strategy to use to initialize the missing values. Same as the + `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`. + + fill_value : str or numerical value, default=None + When `strategy="constant"`, `fill_value` is used to replace all + occurrences of missing_values. For string or object data types, + `fill_value` must be a string. + If `None`, `fill_value` will be 0 when imputing numerical + data and "missing_value" for strings or object data types. + + .. versionadded:: 1.3 + + imputation_order : {'ascending', 'descending', 'roman', 'arabic', \ + 'random'}, default='ascending' + The order in which the features will be imputed. Possible values: + + - `'ascending'`: From features with fewest missing values to most. + - `'descending'`: From features with most missing values to fewest. + - `'roman'`: Left to right. + - `'arabic'`: Right to left. + - `'random'`: A random order for each round. + + skip_complete : bool, default=False + If `True` then features with missing values during :meth:`transform` + which did not have any missing values during :meth:`fit` will be + imputed with the initial imputation method only. Set to `True` if you + have many features with no missing values at both :meth:`fit` and + :meth:`transform` time to save compute. + + min_value : float or array-like of shape (n_features,), default=-np.inf + Minimum possible imputed value. Broadcast to shape `(n_features,)` if + scalar. If array-like, expects shape `(n_features,)`, one min value for + each feature. The default is `-np.inf`. + + .. versionchanged:: 0.23 + Added support for array-like. + + max_value : float or array-like of shape (n_features,), default=np.inf + Maximum possible imputed value. Broadcast to shape `(n_features,)` if + scalar. If array-like, expects shape `(n_features,)`, one max value for + each feature. The default is `np.inf`. + + .. versionchanged:: 0.23 + Added support for array-like. + + verbose : int, default=0 + Verbosity flag, controls the debug messages that are issued + as functions are evaluated. The higher, the more verbose. Can be 0, 1, + or 2. + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator to use. Randomizes + selection of estimator features if `n_nearest_features` is not `None`, + the `imputation_order` if `random`, and the sampling from posterior if + `sample_posterior=True`. Use an integer for determinism. + See :term:`the Glossary `. + + add_indicator : bool, default=False + If `True`, a :class:`MissingIndicator` transform will stack onto output + of the imputer's transform. This allows a predictive estimator + to account for missingness despite imputation. If a feature has no + missing values at fit/train time, the feature won't appear on + the missing indicator even if there are missing values at + transform/test time. + + keep_empty_features : bool, default=False + If True, features that consist exclusively of missing values when + `fit` is called are returned in results when `transform` is called. + The imputed value is always `0` except when + `initial_strategy="constant"` in which case `fill_value` will be + used instead. + + .. versionadded:: 1.2 + + Attributes + ---------- + initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer` + Imputer used to initialize the missing values. + + imputation_sequence_ : list of tuples + Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where + `feat_idx` is the current feature to be imputed, + `neighbor_feat_idx` is the array of other features used to impute the + current feature, and `estimator` is the trained estimator used for + the imputation. Length is `self.n_features_with_missing_ * + self.n_iter_`. + + n_iter_ : int + Number of iteration rounds that occurred. Will be less than + `self.max_iter` if early stopping criterion was reached. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_features_with_missing_ : int + Number of features with missing values. + + indicator_ : :class:`~sklearn.impute.MissingIndicator` + Indicator used to add binary indicators for missing values. + `None` if `add_indicator=False`. + + random_state_ : RandomState instance + RandomState instance that is generated either from a seed, the random + number generator or by `np.random`. + + See Also + -------- + SimpleImputer : Univariate imputer for completing missing values + with simple strategies. + KNNImputer : Multivariate imputer that estimates missing features using + nearest samples. + + Notes + ----- + To support imputation in inductive mode we store each feature's estimator + during the :meth:`fit` phase, and predict without refitting (in order) + during the :meth:`transform` phase. + + Features which contain all missing values at :meth:`fit` are discarded upon + :meth:`transform`. + + Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))` + where :math:`k` = `max_iter`, :math:`n` the number of samples and + :math:`p` the number of features. It thus becomes prohibitively costly when + the number of features increases. Setting + `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol` + can help to reduce its computational cost. + + Depending on the nature of missing values, simple imputers can be + preferable in a prediction context. + + References + ---------- + .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice: + Multivariate Imputation by Chained Equations in R". Journal of + Statistical Software 45: 1-67. + `_ + + .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in + Multivariate Data Suitable for use with an Electronic Computer". + Journal of the Royal Statistical Society 22(2): 302-306. + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.experimental import enable_iterative_imputer + >>> from sklearn.impute import IterativeImputer + >>> imp_mean = IterativeImputer(random_state=0) + >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) + IterativeImputer(random_state=0) + >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] + >>> imp_mean.transform(X) + array([[ 6.9584..., 2. , 3. ], + [ 4. , 2.6000..., 6. ], + [10. , 4.9999..., 9. ]]) + + For a more detailed example see + :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py` or + :ref:`sphx_glr_auto_examples_impute_plot_iterative_imputer_variants_comparison.py`. + """ + + _parameter_constraints: dict = { + **_BaseImputer._parameter_constraints, + "estimator": [None, HasMethods(["fit", "predict"])], + "sample_posterior": ["boolean"], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")], + "initial_strategy": [ + StrOptions({"mean", "median", "most_frequent", "constant"}) + ], + "fill_value": "no_validation", # any object is valid + "imputation_order": [ + StrOptions({"ascending", "descending", "roman", "arabic", "random"}) + ], + "skip_complete": ["boolean"], + "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"], + "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator=None, + *, + missing_values=np.nan, + sample_posterior=False, + max_iter=10, + tol=1e-3, + n_nearest_features=None, + initial_strategy="mean", + fill_value=None, + imputation_order="ascending", + skip_complete=False, + min_value=-np.inf, + max_value=np.inf, + verbose=0, + random_state=None, + add_indicator=False, + keep_empty_features=False, + ): + super().__init__( + missing_values=missing_values, + add_indicator=add_indicator, + keep_empty_features=keep_empty_features, + ) + + self.estimator = estimator + self.sample_posterior = sample_posterior + self.max_iter = max_iter + self.tol = tol + self.n_nearest_features = n_nearest_features + self.initial_strategy = initial_strategy + self.fill_value = fill_value + self.imputation_order = imputation_order + self.skip_complete = skip_complete + self.min_value = min_value + self.max_value = max_value + self.verbose = verbose + self.random_state = random_state + + def _impute_one_feature( + self, + X_filled, + mask_missing_values, + feat_idx, + neighbor_feat_idx, + estimator=None, + fit_mode=True, + ): + """Impute a single feature from the others provided. + + This function predicts the missing values of one of the features using + the current estimates of all the other features. The `estimator` must + support `return_std=True` in its `predict` method for this function + to work. + + Parameters + ---------- + X_filled : ndarray + Input data with the most recent imputations. + + mask_missing_values : ndarray + Input data's missing indicator matrix. + + feat_idx : int + Index of the feature currently being imputed. + + neighbor_feat_idx : ndarray + Indices of the features to be used in imputing `feat_idx`. + + estimator : object + The estimator to use at this step of the round-robin imputation. + If `sample_posterior=True`, the estimator must support + `return_std` in its `predict` method. + If None, it will be cloned from self._estimator. + + fit_mode : boolean, default=True + Whether to fit and predict with the estimator or just predict. + + Returns + ------- + X_filled : ndarray + Input data with `X_filled[missing_row_mask, feat_idx]` updated. + + estimator : estimator with sklearn API + The fitted estimator used to impute + `X_filled[missing_row_mask, feat_idx]`. + """ + if estimator is None and fit_mode is False: + raise ValueError( + "If fit_mode is False, then an already-fitted " + "estimator should be passed in." + ) + + if estimator is None: + estimator = clone(self._estimator) + + missing_row_mask = mask_missing_values[:, feat_idx] + if fit_mode: + X_train = _safe_indexing( + _safe_indexing(X_filled, neighbor_feat_idx, axis=1), + ~missing_row_mask, + axis=0, + ) + y_train = _safe_indexing( + _safe_indexing(X_filled, feat_idx, axis=1), + ~missing_row_mask, + axis=0, + ) + estimator.fit(X_train, y_train) + + # if no missing values, don't predict + if np.sum(missing_row_mask) == 0: + return X_filled, estimator + + # get posterior samples if there is at least one missing value + X_test = _safe_indexing( + _safe_indexing(X_filled, neighbor_feat_idx, axis=1), + missing_row_mask, + axis=0, + ) + if self.sample_posterior: + mus, sigmas = estimator.predict(X_test, return_std=True) + imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype) + # two types of problems: (1) non-positive sigmas + # (2) mus outside legal range of min_value and max_value + # (results in inf sample) + positive_sigmas = sigmas > 0 + imputed_values[~positive_sigmas] = mus[~positive_sigmas] + mus_too_low = mus < self._min_value[feat_idx] + imputed_values[mus_too_low] = self._min_value[feat_idx] + mus_too_high = mus > self._max_value[feat_idx] + imputed_values[mus_too_high] = self._max_value[feat_idx] + # the rest can be sampled without statistical issues + inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high + mus = mus[inrange_mask] + sigmas = sigmas[inrange_mask] + a = (self._min_value[feat_idx] - mus) / sigmas + b = (self._max_value[feat_idx] - mus) / sigmas + + truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas) + imputed_values[inrange_mask] = truncated_normal.rvs( + random_state=self.random_state_ + ) + else: + imputed_values = estimator.predict(X_test) + imputed_values = np.clip( + imputed_values, self._min_value[feat_idx], self._max_value[feat_idx] + ) + + # update the feature + _safe_assign( + X_filled, + imputed_values, + row_indexer=missing_row_mask, + column_indexer=feat_idx, + ) + return X_filled, estimator + + def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat): + """Get a list of other features to predict `feat_idx`. + + If `self.n_nearest_features` is less than or equal to the total + number of features, then use a probability proportional to the absolute + correlation between `feat_idx` and each other feature to randomly + choose a subsample of the other features (without replacement). + + Parameters + ---------- + n_features : int + Number of features in `X`. + + feat_idx : int + Index of the feature currently being imputed. + + abs_corr_mat : ndarray, shape (n_features, n_features) + Absolute correlation matrix of `X`. The diagonal has been zeroed + out and each feature has been normalized to sum to 1. Can be None. + + Returns + ------- + neighbor_feat_idx : array-like + The features to use to impute `feat_idx`. + """ + if self.n_nearest_features is not None and self.n_nearest_features < n_features: + p = abs_corr_mat[:, feat_idx] + neighbor_feat_idx = self.random_state_.choice( + np.arange(n_features), self.n_nearest_features, replace=False, p=p + ) + else: + inds_left = np.arange(feat_idx) + inds_right = np.arange(feat_idx + 1, n_features) + neighbor_feat_idx = np.concatenate((inds_left, inds_right)) + return neighbor_feat_idx + + def _get_ordered_idx(self, mask_missing_values): + """Decide in what order we will update the features. + + As a homage to the MICE R package, we will have 4 main options of + how to order the updates, and use a random order if anything else + is specified. + + Also, this function skips features which have no missing values. + + Parameters + ---------- + mask_missing_values : array-like, shape (n_samples, n_features) + Input data's missing indicator matrix, where `n_samples` is the + number of samples and `n_features` is the number of features. + + Returns + ------- + ordered_idx : ndarray, shape (n_features,) + The order in which to impute the features. + """ + frac_of_missing_values = mask_missing_values.mean(axis=0) + if self.skip_complete: + missing_values_idx = np.flatnonzero(frac_of_missing_values) + else: + missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0]) + if self.imputation_order == "roman": + ordered_idx = missing_values_idx + elif self.imputation_order == "arabic": + ordered_idx = missing_values_idx[::-1] + elif self.imputation_order == "ascending": + n = len(frac_of_missing_values) - len(missing_values_idx) + ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:] + elif self.imputation_order == "descending": + n = len(frac_of_missing_values) - len(missing_values_idx) + ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1] + elif self.imputation_order == "random": + ordered_idx = missing_values_idx + self.random_state_.shuffle(ordered_idx) + return ordered_idx + + def _get_abs_corr_mat(self, X_filled, tolerance=1e-6): + """Get absolute correlation matrix between features. + + Parameters + ---------- + X_filled : ndarray, shape (n_samples, n_features) + Input data with the most recent imputations. + + tolerance : float, default=1e-6 + `abs_corr_mat` can have nans, which will be replaced + with `tolerance`. + + Returns + ------- + abs_corr_mat : ndarray, shape (n_features, n_features) + Absolute correlation matrix of `X` at the beginning of the + current round. The diagonal has been zeroed out and each feature's + absolute correlations with all others have been normalized to sum + to 1. + """ + n_features = X_filled.shape[1] + if self.n_nearest_features is None or self.n_nearest_features >= n_features: + return None + with np.errstate(invalid="ignore"): + # if a feature in the neighborhood has only a single value + # (e.g., categorical feature), the std. dev. will be null and + # np.corrcoef will raise a warning due to a division by zero + abs_corr_mat = np.abs(np.corrcoef(X_filled.T)) + # np.corrcoef is not defined for features with zero std + abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance + # ensures exploration, i.e. at least some probability of sampling + np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat) + # features are not their own neighbors + np.fill_diagonal(abs_corr_mat, 0) + # needs to sum to 1 for np.random.choice sampling + abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False) + return abs_corr_mat + + def _initial_imputation(self, X, in_fit=False): + """Perform initial imputation for input `X`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + in_fit : bool, default=False + Whether function is called in :meth:`fit`. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + X_filled : ndarray of shape (n_samples, n_features) + Input data with the most recent imputations. + + mask_missing_values : ndarray of shape (n_samples, n_features) + Input data's missing indicator matrix, where `n_samples` is the + number of samples and `n_features` is the number of features, + masked by non-missing features. + + X_missing_mask : ndarray, shape (n_samples, n_features) + Input data's mask matrix indicating missing datapoints, where + `n_samples` is the number of samples and `n_features` is the + number of features. + """ + if is_scalar_nan(self.missing_values): + force_all_finite = "allow-nan" + else: + force_all_finite = True + + X = self._validate_data( + X, + dtype=FLOAT_DTYPES, + order="F", + reset=in_fit, + force_all_finite=force_all_finite, + ) + _check_inputs_dtype(X, self.missing_values) + + X_missing_mask = _get_mask(X, self.missing_values) + mask_missing_values = X_missing_mask.copy() + if self.initial_imputer_ is None: + self.initial_imputer_ = SimpleImputer( + missing_values=self.missing_values, + strategy=self.initial_strategy, + fill_value=self.fill_value, + keep_empty_features=self.keep_empty_features, + ).set_output(transform="default") + X_filled = self.initial_imputer_.fit_transform(X) + else: + X_filled = self.initial_imputer_.transform(X) + + valid_mask = np.flatnonzero( + np.logical_not(np.isnan(self.initial_imputer_.statistics_)) + ) + + if not self.keep_empty_features: + # drop empty features + Xt = X[:, valid_mask] + mask_missing_values = mask_missing_values[:, valid_mask] + else: + # mark empty features as not missing and keep the original + # imputation + mask_missing_values[:, valid_mask] = True + Xt = X + + return Xt, X_filled, mask_missing_values, X_missing_mask + + @staticmethod + def _validate_limit(limit, limit_type, n_features): + """Validate the limits (min/max) of the feature values. + + Converts scalar min/max limits to vectors of shape `(n_features,)`. + + Parameters + ---------- + limit: scalar or array-like + The user-specified limit (i.e, min_value or max_value). + limit_type: {'max', 'min'} + Type of limit to validate. + n_features: int + Number of features in the dataset. + + Returns + ------- + limit: ndarray, shape(n_features,) + Array of limits, one for each feature. + """ + limit_bound = np.inf if limit_type == "max" else -np.inf + limit = limit_bound if limit is None else limit + if np.isscalar(limit): + limit = np.full(n_features, limit) + limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False) + if not limit.shape[0] == n_features: + raise ValueError( + f"'{limit_type}_value' should be of " + f"shape ({n_features},) when an array-like " + f"is provided. Got {limit.shape}, instead." + ) + return limit + + @_fit_context( + # IterativeImputer.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None): + """Fit the imputer on `X` and return the transformed `X`. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : array-like, shape (n_samples, n_features) + The imputed input data. + """ + self.random_state_ = getattr( + self, "random_state_", check_random_state(self.random_state) + ) + + if self.estimator is None: + from ..linear_model import BayesianRidge + + self._estimator = BayesianRidge() + else: + self._estimator = clone(self.estimator) + + self.imputation_sequence_ = [] + + self.initial_imputer_ = None + + X, Xt, mask_missing_values, complete_mask = self._initial_imputation( + X, in_fit=True + ) + + super()._fit_indicator(complete_mask) + X_indicator = super()._transform_indicator(complete_mask) + + if self.max_iter == 0 or np.all(mask_missing_values): + self.n_iter_ = 0 + return super()._concatenate_indicator(Xt, X_indicator) + + # Edge case: a single feature. We return the initial ... + if Xt.shape[1] == 1: + self.n_iter_ = 0 + return super()._concatenate_indicator(Xt, X_indicator) + + self._min_value = self._validate_limit(self.min_value, "min", X.shape[1]) + self._max_value = self._validate_limit(self.max_value, "max", X.shape[1]) + + if not np.all(np.greater(self._max_value, self._min_value)): + raise ValueError("One (or more) features have min_value >= max_value.") + + # order in which to impute + # note this is probably too slow for large feature data (d > 100000) + # and a better way would be good. + # see: https://goo.gl/KyCNwj and subsequent comments + ordered_idx = self._get_ordered_idx(mask_missing_values) + self.n_features_with_missing_ = len(ordered_idx) + + abs_corr_mat = self._get_abs_corr_mat(Xt) + + n_samples, n_features = Xt.shape + if self.verbose > 0: + print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) + start_t = time() + if not self.sample_posterior: + Xt_previous = Xt.copy() + normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values])) + for self.n_iter_ in range(1, self.max_iter + 1): + if self.imputation_order == "random": + ordered_idx = self._get_ordered_idx(mask_missing_values) + + for feat_idx in ordered_idx: + neighbor_feat_idx = self._get_neighbor_feat_idx( + n_features, feat_idx, abs_corr_mat + ) + Xt, estimator = self._impute_one_feature( + Xt, + mask_missing_values, + feat_idx, + neighbor_feat_idx, + estimator=None, + fit_mode=True, + ) + estimator_triplet = _ImputerTriplet( + feat_idx, neighbor_feat_idx, estimator + ) + self.imputation_sequence_.append(estimator_triplet) + + if self.verbose > 1: + print( + "[IterativeImputer] Ending imputation round " + "%d/%d, elapsed time %0.2f" + % (self.n_iter_, self.max_iter, time() - start_t) + ) + + if not self.sample_posterior: + inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None) + if self.verbose > 0: + print( + "[IterativeImputer] Change: {}, scaled tolerance: {} ".format( + inf_norm, normalized_tol + ) + ) + if inf_norm < normalized_tol: + if self.verbose > 0: + print("[IterativeImputer] Early stopping criterion reached.") + break + Xt_previous = Xt.copy() + else: + if not self.sample_posterior: + warnings.warn( + "[IterativeImputer] Early stopping criterion not reached.", + ConvergenceWarning, + ) + _assign_where(Xt, X, cond=~mask_missing_values) + + return super()._concatenate_indicator(Xt, X_indicator) + + def transform(self, X): + """Impute all missing values in `X`. + + Note that this is stochastic, and that if `random_state` is not fixed, + repeated calls, or permuted input, results will differ. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + Xt : array-like, shape (n_samples, n_features) + The imputed input data. + """ + check_is_fitted(self) + + X, Xt, mask_missing_values, complete_mask = self._initial_imputation( + X, in_fit=False + ) + + X_indicator = super()._transform_indicator(complete_mask) + + if self.n_iter_ == 0 or np.all(mask_missing_values): + return super()._concatenate_indicator(Xt, X_indicator) + + imputations_per_round = len(self.imputation_sequence_) // self.n_iter_ + i_rnd = 0 + if self.verbose > 0: + print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) + start_t = time() + for it, estimator_triplet in enumerate(self.imputation_sequence_): + Xt, _ = self._impute_one_feature( + Xt, + mask_missing_values, + estimator_triplet.feat_idx, + estimator_triplet.neighbor_feat_idx, + estimator=estimator_triplet.estimator, + fit_mode=False, + ) + if not (it + 1) % imputations_per_round: + if self.verbose > 1: + print( + "[IterativeImputer] Ending imputation round " + "%d/%d, elapsed time %0.2f" + % (i_rnd + 1, self.n_iter_, time() - start_t) + ) + i_rnd += 1 + + _assign_where(Xt, X, cond=~mask_missing_values) + + return super()._concatenate_indicator(Xt, X_indicator) + + def fit(self, X, y=None): + """Fit the imputer on `X` and return self. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Fitted estimator. + """ + self.fit_transform(X) + return self + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + names = self.initial_imputer_.get_feature_names_out(input_features) + return self._concatenate_indicator_feature_names_out(names, input_features) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_knn.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_knn.py new file mode 100644 index 0000000000000000000000000000000000000000..d20530bb67cb05017950a345455734cffd2f1008 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/_knn.py @@ -0,0 +1,401 @@ +# Authors: Ashim Bhattarai +# Thomas J Fan +# License: BSD 3 clause + +from numbers import Integral + +import numpy as np + +from ..base import _fit_context +from ..metrics import pairwise_distances_chunked +from ..metrics.pairwise import _NAN_METRICS +from ..neighbors._base import _get_weights +from ..utils import is_scalar_nan +from ..utils._mask import _get_mask +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted +from ._base import _BaseImputer + + +class KNNImputer(_BaseImputer): + """Imputation for completing missing values using k-Nearest Neighbors. + + Each sample's missing values are imputed using the mean value from + `n_neighbors` nearest neighbors found in the training set. Two samples are + close if the features that neither is missing are close. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + missing_values : int, float, str, np.nan or None, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + should be set to np.nan, since `pd.NA` will be converted to np.nan. + + n_neighbors : int, default=5 + Number of neighboring samples to use for imputation. + + weights : {'uniform', 'distance'} or callable, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood are + weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - callable : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + metric : {'nan_euclidean'} or callable, default='nan_euclidean' + Distance metric for searching neighbors. Possible values: + + - 'nan_euclidean' + - callable : a user-defined function which conforms to the definition + of ``_pairwise_callable(X, Y, metric, **kwds)``. The function + accepts two arrays, X and Y, and a `missing_values` keyword in + `kwds` and returns a scalar distance value. + + copy : bool, default=True + If True, a copy of X will be created. If False, imputation will + be done in-place whenever possible. + + add_indicator : bool, default=False + If True, a :class:`MissingIndicator` transform will stack onto the + output of the imputer's transform. This allows a predictive estimator + to account for missingness despite imputation. If a feature has no + missing values at fit/train time, the feature won't appear on the + missing indicator even if there are missing values at transform/test + time. + + keep_empty_features : bool, default=False + If True, features that consist exclusively of missing values when + `fit` is called are returned in results when `transform` is called. + The imputed value is always `0`. + + .. versionadded:: 1.2 + + Attributes + ---------- + indicator_ : :class:`~sklearn.impute.MissingIndicator` + Indicator used to add binary indicators for missing values. + ``None`` if add_indicator is False. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SimpleImputer : Univariate imputer for completing missing values + with simple strategies. + IterativeImputer : Multivariate imputer that estimates values to impute for + each feature with missing values from all the others. + + References + ---------- + * `Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor + Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing + value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17 + no. 6, 2001 Pages 520-525. + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.impute import KNNImputer + >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]] + >>> imputer = KNNImputer(n_neighbors=2) + >>> imputer.fit_transform(X) + array([[1. , 2. , 4. ], + [3. , 4. , 3. ], + [5.5, 6. , 5. ], + [8. , 8. , 7. ]]) + + For a more detailed example see + :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`. + """ + + _parameter_constraints: dict = { + **_BaseImputer._parameter_constraints, + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)], + "metric": [StrOptions(set(_NAN_METRICS)), callable], + "copy": ["boolean"], + } + + def __init__( + self, + *, + missing_values=np.nan, + n_neighbors=5, + weights="uniform", + metric="nan_euclidean", + copy=True, + add_indicator=False, + keep_empty_features=False, + ): + super().__init__( + missing_values=missing_values, + add_indicator=add_indicator, + keep_empty_features=keep_empty_features, + ) + self.n_neighbors = n_neighbors + self.weights = weights + self.metric = metric + self.copy = copy + + def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col): + """Helper function to impute a single column. + + Parameters + ---------- + dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors) + Distance matrix between the receivers and potential donors from + training set. There must be at least one non-nan distance between + a receiver and a potential donor. + + n_neighbors : int + Number of neighbors to consider. + + fit_X_col : ndarray of shape (n_potential_donors,) + Column of potential donors from training set. + + mask_fit_X_col : ndarray of shape (n_potential_donors,) + Missing mask for fit_X_col. + + Returns + ------- + imputed_values: ndarray of shape (n_receivers,) + Imputed values for receiver. + """ + # Get donors + donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[ + :, :n_neighbors + ] + + # Get weight matrix from distance matrix + donors_dist = dist_pot_donors[ + np.arange(donors_idx.shape[0])[:, None], donors_idx + ] + + weight_matrix = _get_weights(donors_dist, self.weights) + + # fill nans with zeros + if weight_matrix is not None: + weight_matrix[np.isnan(weight_matrix)] = 0.0 + + # Retrieve donor values and calculate kNN average + donors = fit_X_col.take(donors_idx) + donors_mask = mask_fit_X_col.take(donors_idx) + donors = np.ma.array(donors, mask=donors_mask) + + return np.ma.average(donors, axis=1, weights=weight_matrix).data + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the imputer on X. + + Parameters + ---------- + X : array-like shape of (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + The fitted `KNNImputer` class instance. + """ + # Check data integrity and calling arguments + if not is_scalar_nan(self.missing_values): + force_all_finite = True + else: + force_all_finite = "allow-nan" + + X = self._validate_data( + X, + accept_sparse=False, + dtype=FLOAT_DTYPES, + force_all_finite=force_all_finite, + copy=self.copy, + ) + + self._fit_X = X + self._mask_fit_X = _get_mask(self._fit_X, self.missing_values) + self._valid_mask = ~np.all(self._mask_fit_X, axis=0) + + super()._fit_indicator(self._mask_fit_X) + + return self + + def transform(self, X): + """Impute all missing values in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + X : array-like of shape (n_samples, n_output_features) + The imputed dataset. `n_output_features` is the number of features + that is not always missing during `fit`. + """ + + check_is_fitted(self) + if not is_scalar_nan(self.missing_values): + force_all_finite = True + else: + force_all_finite = "allow-nan" + X = self._validate_data( + X, + accept_sparse=False, + dtype=FLOAT_DTYPES, + force_all_finite=force_all_finite, + copy=self.copy, + reset=False, + ) + + mask = _get_mask(X, self.missing_values) + mask_fit_X = self._mask_fit_X + valid_mask = self._valid_mask + + X_indicator = super()._transform_indicator(mask) + + # Removes columns where the training data is all nan + if not np.any(mask): + # No missing values in X + if self.keep_empty_features: + Xc = X + Xc[:, ~valid_mask] = 0 + else: + Xc = X[:, valid_mask] + + # Even if there are no missing values in X, we still concatenate Xc + # with the missing value indicator matrix, X_indicator. + # This is to ensure that the output maintains consistency in terms + # of columns, regardless of whether missing values exist in X or not. + return super()._concatenate_indicator(Xc, X_indicator) + + row_missing_idx = np.flatnonzero(mask.any(axis=1)) + + non_missing_fix_X = np.logical_not(mask_fit_X) + + # Maps from indices from X to indices in dist matrix + dist_idx_map = np.zeros(X.shape[0], dtype=int) + dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0]) + + def process_chunk(dist_chunk, start): + row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)] + + # Find and impute missing by column + for col in range(X.shape[1]): + if not valid_mask[col]: + # column was all missing during training + continue + + col_mask = mask[row_missing_chunk, col] + if not np.any(col_mask): + # column has no missing values + continue + + (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col]) + + # receivers_idx are indices in X + receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)] + + # distances for samples that needed imputation for column + dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ + :, potential_donors_idx + ] + + # receivers with all nan distances impute with mean + all_nan_dist_mask = np.isnan(dist_subset).all(axis=1) + all_nan_receivers_idx = receivers_idx[all_nan_dist_mask] + + if all_nan_receivers_idx.size: + col_mean = np.ma.array( + self._fit_X[:, col], mask=mask_fit_X[:, col] + ).mean() + X[all_nan_receivers_idx, col] = col_mean + + if len(all_nan_receivers_idx) == len(receivers_idx): + # all receivers imputed with mean + continue + + # receivers with at least one defined distance + receivers_idx = receivers_idx[~all_nan_dist_mask] + dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ + :, potential_donors_idx + ] + + n_neighbors = min(self.n_neighbors, len(potential_donors_idx)) + value = self._calc_impute( + dist_subset, + n_neighbors, + self._fit_X[potential_donors_idx, col], + mask_fit_X[potential_donors_idx, col], + ) + X[receivers_idx, col] = value + + # process in fixed-memory chunks + gen = pairwise_distances_chunked( + X[row_missing_idx, :], + self._fit_X, + metric=self.metric, + missing_values=self.missing_values, + force_all_finite=force_all_finite, + reduce_func=process_chunk, + ) + for chunk in gen: + # process_chunk modifies X in place. No return value. + pass + + if self.keep_empty_features: + Xc = X + Xc[:, ~valid_mask] = 0 + else: + Xc = X[:, valid_mask] + + return super()._concatenate_indicator(Xc, X_indicator) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + names = input_features[self._valid_mask] + return self._concatenate_indicator_feature_names_out(names, input_features) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28093f616e036f59952f6789c2d6277d652b3f38 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3417592df029e438bd58cddfe065f464a6cbe75b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66b40b32894e75348fc51d0d3fd16b92020e6ac1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c96d30719b0aec717a919646a04ea82208406334 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a23dfea4fcc20eef6d2d8ef8782ea2b18d3120f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1bd83f7ca9ea8adde76940e2f7fdd86d89ea5c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py @@ -0,0 +1,107 @@ +import numpy as np +import pytest + +from sklearn.impute._base import _BaseImputer +from sklearn.impute._iterative import _assign_where +from sklearn.utils._mask import _get_mask +from sklearn.utils._testing import _convert_container, assert_allclose + + +@pytest.fixture +def data(): + X = np.random.randn(10, 2) + X[::2] = np.nan + return X + + +class NoFitIndicatorImputer(_BaseImputer): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + return self._concatenate_indicator(X, self._transform_indicator(X)) + + +class NoTransformIndicatorImputer(_BaseImputer): + def fit(self, X, y=None): + mask = _get_mask(X, value_to_mask=np.nan) + super()._fit_indicator(mask) + return self + + def transform(self, X, y=None): + return self._concatenate_indicator(X, None) + + +class NoPrecomputedMaskFit(_BaseImputer): + def fit(self, X, y=None): + self._fit_indicator(X) + return self + + def transform(self, X): + return self._concatenate_indicator(X, self._transform_indicator(X)) + + +class NoPrecomputedMaskTransform(_BaseImputer): + def fit(self, X, y=None): + mask = _get_mask(X, value_to_mask=np.nan) + self._fit_indicator(mask) + return self + + def transform(self, X): + return self._concatenate_indicator(X, self._transform_indicator(X)) + + +def test_base_imputer_not_fit(data): + imputer = NoFitIndicatorImputer(add_indicator=True) + err_msg = "Make sure to call _fit_indicator before _transform_indicator" + with pytest.raises(ValueError, match=err_msg): + imputer.fit(data).transform(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +def test_base_imputer_not_transform(data): + imputer = NoTransformIndicatorImputer(add_indicator=True) + err_msg = ( + "Call _fit_indicator and _transform_indicator in the imputer implementation" + ) + with pytest.raises(ValueError, match=err_msg): + imputer.fit(data).transform(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +def test_base_no_precomputed_mask_fit(data): + imputer = NoPrecomputedMaskFit(add_indicator=True) + err_msg = "precomputed is True but the input data is not a mask" + with pytest.raises(ValueError, match=err_msg): + imputer.fit(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +def test_base_no_precomputed_mask_transform(data): + imputer = NoPrecomputedMaskTransform(add_indicator=True) + err_msg = "precomputed is True but the input data is not a mask" + imputer.fit(data) + with pytest.raises(ValueError, match=err_msg): + imputer.transform(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +@pytest.mark.parametrize("X1_type", ["array", "dataframe"]) +def test_assign_where(X1_type): + """Check the behaviour of the private helpers `_assign_where`.""" + rng = np.random.RandomState(0) + + n_samples, n_features = 10, 5 + X1 = _convert_container(rng.randn(n_samples, n_features), constructor_name=X1_type) + X2 = rng.randn(n_samples, n_features) + mask = rng.randint(0, 2, size=(n_samples, n_features)).astype(bool) + + _assign_where(X1, X2, mask) + + if X1_type == "dataframe": + X1 = X1.to_numpy() + assert_allclose(X1[mask], X2[mask]) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..4d41b44fb0252666952c70caed372e1f2a048bf0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py @@ -0,0 +1,220 @@ +import numpy as np +import pytest + +from sklearn.experimental import enable_iterative_imputer # noqa +from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def imputers(): + return [IterativeImputer(tol=0.1), KNNImputer(), SimpleImputer()] + + +def sparse_imputers(): + return [SimpleImputer()] + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +def test_imputation_missing_value_in_test_array(imputer): + # [Non Regression Test for issue #13968] Missing value in test set should + # not throw an error and return a finite dataset + train = [[1], [2]] + test = [[3], [np.nan]] + imputer.set_params(add_indicator=True) + imputer.fit(train).transform(test) + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("marker", [np.nan, -1, 0]) +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +def test_imputers_add_indicator(marker, imputer): + X = np.array( + [ + [marker, 1, 5, marker, 1], + [2, marker, 1, marker, 2], + [6, 3, marker, marker, 3], + [1, 2, 9, marker, 4], + ] + ) + X_true_indicator = np.array( + [ + [1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + imputer.set_params(missing_values=marker, add_indicator=True) + + X_trans = imputer.fit_transform(X) + assert_allclose(X_trans[:, -4:], X_true_indicator) + assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) + + imputer.set_params(add_indicator=False) + X_trans_no_indicator = imputer.fit_transform(X) + assert_allclose(X_trans[:, :-4], X_trans_no_indicator) + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("marker", [np.nan, -1]) +@pytest.mark.parametrize( + "imputer", sparse_imputers(), ids=lambda x: x.__class__.__name__ +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_imputers_add_indicator_sparse(imputer, marker, csr_container): + X = csr_container( + [ + [marker, 1, 5, marker, 1], + [2, marker, 1, marker, 2], + [6, 3, marker, marker, 3], + [1, 2, 9, marker, 4], + ] + ) + X_true_indicator = csr_container( + [ + [1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + imputer.set_params(missing_values=marker, add_indicator=True) + + X_trans = imputer.fit_transform(X) + assert_allclose_dense_sparse(X_trans[:, -4:], X_true_indicator) + assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) + + imputer.set_params(add_indicator=False) + X_trans_no_indicator = imputer.fit_transform(X) + assert_allclose_dense_sparse(X_trans[:, :-4], X_trans_no_indicator) + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +@pytest.mark.parametrize("add_indicator", [True, False]) +def test_imputers_pandas_na_integer_array_support(imputer, add_indicator): + # Test pandas IntegerArray with pd.NA + pd = pytest.importorskip("pandas") + marker = np.nan + imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker) + + X = np.array( + [ + [marker, 1, 5, marker, 1], + [2, marker, 1, marker, 2], + [6, 3, marker, marker, 3], + [1, 2, 9, marker, 4], + ] + ) + # fit on numpy array + X_trans_expected = imputer.fit_transform(X) + + # Creates dataframe with IntegerArrays with pd.NA + X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c", "d", "e"]) + + # fit on pandas dataframe with IntegerArrays + X_trans = imputer.fit_transform(X_df) + + assert_allclose(X_trans_expected, X_trans) + + +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +@pytest.mark.parametrize("add_indicator", [True, False]) +def test_imputers_feature_names_out_pandas(imputer, add_indicator): + """Check feature names out for imputers.""" + pd = pytest.importorskip("pandas") + marker = np.nan + imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker) + + X = np.array( + [ + [marker, 1, 5, 3, marker, 1], + [2, marker, 1, 4, marker, 2], + [6, 3, 7, marker, marker, 3], + [1, 2, 9, 8, marker, 4], + ] + ) + X_df = pd.DataFrame(X, columns=["a", "b", "c", "d", "e", "f"]) + imputer.fit(X_df) + + names = imputer.get_feature_names_out() + + if add_indicator: + expected_names = [ + "a", + "b", + "c", + "d", + "f", + "missingindicator_a", + "missingindicator_b", + "missingindicator_d", + "missingindicator_e", + ] + assert_array_equal(expected_names, names) + else: + expected_names = ["a", "b", "c", "d", "f"] + assert_array_equal(expected_names, names) + + +@pytest.mark.parametrize("keep_empty_features", [True, False]) +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +def test_keep_empty_features(imputer, keep_empty_features): + """Check that the imputer keeps features with only missing values.""" + X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]]) + imputer = imputer.set_params( + add_indicator=False, keep_empty_features=keep_empty_features + ) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + if keep_empty_features: + assert X_imputed.shape == X.shape + else: + assert X_imputed.shape == (X.shape[0], X.shape[1] - 1) + + +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +@pytest.mark.parametrize("missing_value_test", [np.nan, 1]) +def test_imputation_adds_missing_indicator_if_add_indicator_is_true( + imputer, missing_value_test +): + """Check that missing indicator always exists when add_indicator=True. + + Non-regression test for gh-26590. + """ + X_train = np.array([[0, np.nan], [1, 2]]) + + # Test data where missing_value_test variable can be set to np.nan or 1. + X_test = np.array([[0, missing_value_test], [1, 2]]) + + imputer.set_params(add_indicator=True) + imputer.fit(X_train) + + X_test_imputed_with_indicator = imputer.transform(X_test) + assert X_test_imputed_with_indicator.shape == (2, 3) + + imputer.set_params(add_indicator=False) + imputer.fit(X_train) + X_test_imputed_without_indicator = imputer.transform(X_test) + assert X_test_imputed_without_indicator.shape == (2, 2) + + assert_allclose( + X_test_imputed_with_indicator[:, :-1], X_test_imputed_without_indicator + ) + if np.isnan(missing_value_test): + expected_missing_indicator = [1, 0] + else: + expected_missing_indicator = [0, 0] + + assert_allclose(X_test_imputed_with_indicator[:, -1], expected_missing_indicator) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py new file mode 100644 index 0000000000000000000000000000000000000000..9322536ebcf473f1a031da965b5b613b08d4f281 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py @@ -0,0 +1,1754 @@ +import io +import re +import warnings +from itertools import product + +import numpy as np +import pytest +from scipy import sparse +from scipy.stats import kstest + +from sklearn import tree +from sklearn.datasets import load_diabetes +from sklearn.dummy import DummyRegressor +from sklearn.exceptions import ConvergenceWarning + +# make IterativeImputer available +from sklearn.experimental import enable_iterative_imputer # noqa +from sklearn.impute import IterativeImputer, KNNImputer, MissingIndicator, SimpleImputer +from sklearn.impute._base import _most_frequent +from sklearn.linear_model import ARDRegression, BayesianRidge, RidgeCV +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline, make_union +from sklearn.random_projection import _sparse_random_matrix +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_allclose_dense_sparse, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + BSR_CONTAINERS, + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + LIL_CONTAINERS, +) + + +def _assert_array_equal_and_same_dtype(x, y): + assert_array_equal(x, y) + assert x.dtype == y.dtype + + +def _assert_allclose_and_same_dtype(x, y): + assert_allclose(x, y) + assert x.dtype == y.dtype + + +def _check_statistics( + X, X_true, strategy, statistics, missing_values, sparse_container +): + """Utility function for testing imputation for a given strategy. + + Test with dense and sparse arrays + + Check that: + - the statistics (mean, median, mode) are correct + - the missing values are imputed correctly""" + + err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % ( + strategy, + missing_values, + ) + + assert_ae = assert_array_equal + + if X.dtype.kind == "f" or X_true.dtype.kind == "f": + assert_ae = assert_array_almost_equal + + # Normal matrix + imputer = SimpleImputer(missing_values=missing_values, strategy=strategy) + X_trans = imputer.fit(X).transform(X.copy()) + assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) + assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) + + # Sparse matrix + imputer = SimpleImputer(missing_values=missing_values, strategy=strategy) + imputer.fit(sparse_container(X)) + X_trans = imputer.transform(sparse_container(X.copy())) + + if sparse.issparse(X_trans): + X_trans = X_trans.toarray() + + assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) + assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_imputation_shape(strategy, csr_container): + # Verify the shapes of the imputed matrix for different strategies. + X = np.random.randn(10, 2) + X[::2] = np.nan + + imputer = SimpleImputer(strategy=strategy) + X_imputed = imputer.fit_transform(csr_container(X)) + assert X_imputed.shape == (10, 2) + X_imputed = imputer.fit_transform(X) + assert X_imputed.shape == (10, 2) + + iterative_imputer = IterativeImputer(initial_strategy=strategy) + X_imputed = iterative_imputer.fit_transform(X) + assert X_imputed.shape == (10, 2) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +def test_imputation_deletion_warning(strategy): + X = np.ones((3, 5)) + X[:, 0] = np.nan + imputer = SimpleImputer(strategy=strategy).fit(X) + + with pytest.warns(UserWarning, match="Skipping"): + imputer.transform(X) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +def test_imputation_deletion_warning_feature_names(strategy): + pd = pytest.importorskip("pandas") + + missing_values = np.nan + feature_names = np.array(["a", "b", "c", "d"], dtype=object) + X = pd.DataFrame( + [ + [missing_values, missing_values, 1, missing_values], + [4, missing_values, 2, 10], + ], + columns=feature_names, + ) + + imputer = SimpleImputer(strategy=strategy).fit(X) + + # check SimpleImputer returning feature name attribute correctly + assert_array_equal(imputer.feature_names_in_, feature_names) + + # ensure that skipped feature warning includes feature name + with pytest.warns( + UserWarning, match=r"Skipping features without any observed values: \['b'\]" + ): + imputer.transform(X) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_error_sparse_0(strategy, csc_container): + # check that error are raised when missing_values = 0 and input is sparse + X = np.ones((3, 5)) + X[0] = 0 + X = csc_container(X) + + imputer = SimpleImputer(strategy=strategy, missing_values=0) + with pytest.raises(ValueError, match="Provide a dense array"): + imputer.fit(X) + + imputer.fit(X.toarray()) + with pytest.raises(ValueError, match="Provide a dense array"): + imputer.transform(X) + + +def safe_median(arr, *args, **kwargs): + # np.median([]) raises a TypeError for numpy >= 1.10.1 + length = arr.size if hasattr(arr, "size") else len(arr) + return np.nan if length == 0 else np.median(arr, *args, **kwargs) + + +def safe_mean(arr, *args, **kwargs): + # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 + length = arr.size if hasattr(arr, "size") else len(arr) + return np.nan if length == 0 else np.mean(arr, *args, **kwargs) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_mean_median(csc_container): + # Test imputation using the mean and median strategies, when + # missing_values != 0. + rng = np.random.RandomState(0) + + dim = 10 + dec = 10 + shape = (dim * dim, dim + dec) + + zeros = np.zeros(shape[0]) + values = np.arange(1, shape[0] + 1) + values[4::2] = -values[4::2] + + tests = [ + ("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), + ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))), + ] + + for strategy, test_missing_values, true_value_fun in tests: + X = np.empty(shape) + X_true = np.empty(shape) + true_statistics = np.empty(shape[1]) + + # Create a matrix X with columns + # - with only zeros, + # - with only missing values + # - with zeros, missing values and values + # And a matrix X_true containing all true values + for j in range(shape[1]): + nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) + nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) + nb_values = shape[0] - nb_zeros - nb_missing_values + + z = zeros[:nb_zeros] + p = np.repeat(test_missing_values, nb_missing_values) + v = values[rng.permutation(len(values))[:nb_values]] + + true_statistics[j] = true_value_fun(z, v, p) + + # Create the columns + X[:, j] = np.hstack((v, z, p)) + + if 0 == test_missing_values: + # XXX unreached code as of v0.22 + X_true[:, j] = np.hstack( + (v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros)) + ) + else: + X_true[:, j] = np.hstack( + (v, z, np.repeat(true_statistics[j], nb_missing_values)) + ) + + # Shuffle them the same way + np.random.RandomState(j).shuffle(X[:, j]) + np.random.RandomState(j).shuffle(X_true[:, j]) + + # Mean doesn't support columns containing NaNs, median does + if strategy == "median": + cols_to_keep = ~np.isnan(X_true).any(axis=0) + else: + cols_to_keep = ~np.isnan(X_true).all(axis=0) + + X_true = X_true[:, cols_to_keep] + + _check_statistics( + X, X_true, strategy, true_statistics, test_missing_values, csc_container + ) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_median_special_cases(csc_container): + # Test median imputation with sparse boundary cases + X = np.array( + [ + [0, np.nan, np.nan], # odd: implicit zero + [5, np.nan, np.nan], # odd: explicit nonzero + [0, 0, np.nan], # even: average two zeros + [-5, 0, np.nan], # even: avg zero and neg + [0, 5, np.nan], # even: avg zero and pos + [4, 5, np.nan], # even: avg nonzeros + [-4, -5, np.nan], # even: avg negatives + [-1, 2, np.nan], # even: crossing neg and pos + ] + ).transpose() + + X_imputed_median = np.array( + [ + [0, 0, 0], + [5, 5, 5], + [0, 0, 0], + [-5, 0, -2.5], + [0, 5, 2.5], + [4, 5, 4.5], + [-4, -5, -4.5], + [-1, 2, 0.5], + ] + ).transpose() + statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5] + + _check_statistics( + X, X_imputed_median, "median", statistics_median, np.nan, csc_container + ) + + +@pytest.mark.parametrize("strategy", ["mean", "median"]) +@pytest.mark.parametrize("dtype", [None, object, str]) +def test_imputation_mean_median_error_invalid_type(strategy, dtype): + X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) + msg = "non-numeric data:\ncould not convert string to float:" + with pytest.raises(ValueError, match=msg): + imputer = SimpleImputer(strategy=strategy) + imputer.fit_transform(X) + + +@pytest.mark.parametrize("strategy", ["mean", "median"]) +@pytest.mark.parametrize("type", ["list", "dataframe"]) +def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type): + X = [["a", "b", 3], [4, "e", 6], ["g", "h", 9]] + if type == "dataframe": + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X) + msg = "non-numeric data:\ncould not convert string to float:" + with pytest.raises(ValueError, match=msg): + imputer = SimpleImputer(strategy=strategy) + imputer.fit_transform(X) + + +@pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) +@pytest.mark.parametrize("dtype", [str, np.dtype("U"), np.dtype("S")]) +def test_imputation_const_mostf_error_invalid_types(strategy, dtype): + # Test imputation on non-numeric data using "most_frequent" and "constant" + # strategy + X = np.array( + [ + [np.nan, np.nan, "a", "f"], + [np.nan, "c", np.nan, "d"], + [np.nan, "b", "d", np.nan], + [np.nan, "c", "d", "h"], + ], + dtype=dtype, + ) + + err_msg = "SimpleImputer does not support data" + with pytest.raises(ValueError, match=err_msg): + imputer = SimpleImputer(strategy=strategy) + imputer.fit(X).transform(X) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_most_frequent(csc_container): + # Test imputation using the most-frequent strategy. + X = np.array( + [ + [-1, -1, 0, 5], + [-1, 2, -1, 3], + [-1, 1, 3, -1], + [-1, 2, 3, 7], + ] + ) + + X_true = np.array( + [ + [2, 0, 5], + [2, 3, 3], + [1, 3, 3], + [2, 3, 7], + ] + ) + + # scipy.stats.mode, used in SimpleImputer, doesn't return the first most + # frequent as promised in the doc but the lowest most frequent. When this + # test will fail after an update of scipy, SimpleImputer will need to be + # updated to be consistent with the new (correct) behaviour + _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1, csc_container) + + +@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) +def test_imputation_most_frequent_objects(marker): + # Test imputation using the most-frequent strategy. + X = np.array( + [ + [marker, marker, "a", "f"], + [marker, "c", marker, "d"], + [marker, "b", "d", marker], + [marker, "c", "d", "h"], + ], + dtype=object, + ) + + X_true = np.array( + [ + ["c", "a", "f"], + ["c", "d", "d"], + ["b", "d", "d"], + ["c", "d", "h"], + ], + dtype=object, + ) + + imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") + X_trans = imputer.fit(X).transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("dtype", [object, "category"]) +def test_imputation_most_frequent_pandas(dtype): + # Test imputation using the most frequent strategy on pandas df + pd = pytest.importorskip("pandas") + + f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,") + + df = pd.read_csv(f, dtype=dtype) + + X_true = np.array( + [["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"]], + dtype=object, + ) + + imputer = SimpleImputer(strategy="most_frequent") + X_trans = imputer.fit_transform(df) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1.0, np.nan)]) +def test_imputation_constant_error_invalid_type(X_data, missing_value): + # Verify that exceptions are raised on invalid fill_value type + X = np.full((3, 5), X_data, dtype=float) + X[0, 0] = missing_value + + fill_value = "x" + err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast" + with pytest.raises(ValueError, match=re.escape(err_msg)): + imputer = SimpleImputer( + missing_values=missing_value, strategy="constant", fill_value=fill_value + ) + imputer.fit_transform(X) + + +def test_imputation_constant_integer(): + # Test imputation using the constant strategy on integers + X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]]) + + X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]]) + + imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0) + X_trans = imputer.fit_transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("array_constructor", CSR_CONTAINERS + [np.asarray]) +def test_imputation_constant_float(array_constructor): + # Test imputation using the constant strategy on floats + X = np.array( + [ + [np.nan, 1.1, 0, np.nan], + [1.2, np.nan, 1.3, np.nan], + [0, 0, np.nan, np.nan], + [1.4, 1.5, 0, np.nan], + ] + ) + + X_true = np.array( + [[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]] + ) + + X = array_constructor(X) + + X_true = array_constructor(X_true) + + imputer = SimpleImputer(strategy="constant", fill_value=-1) + X_trans = imputer.fit_transform(X) + + assert_allclose_dense_sparse(X_trans, X_true) + + +@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) +def test_imputation_constant_object(marker): + # Test imputation using the constant strategy on objects + X = np.array( + [ + [marker, "a", "b", marker], + ["c", marker, "d", marker], + ["e", "f", marker, marker], + ["g", "h", "i", marker], + ], + dtype=object, + ) + + X_true = np.array( + [ + ["missing", "a", "b", "missing"], + ["c", "missing", "d", "missing"], + ["e", "f", "missing", "missing"], + ["g", "h", "i", "missing"], + ], + dtype=object, + ) + + imputer = SimpleImputer( + missing_values=marker, strategy="constant", fill_value="missing" + ) + X_trans = imputer.fit_transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("dtype", [object, "category"]) +def test_imputation_constant_pandas(dtype): + # Test imputation using the constant strategy on pandas df + pd = pytest.importorskip("pandas") + + f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,") + + df = pd.read_csv(f, dtype=dtype) + + X_true = np.array( + [ + ["missing_value", "i", "x", "missing_value"], + ["a", "missing_value", "y", "missing_value"], + ["a", "j", "missing_value", "missing_value"], + ["b", "j", "x", "missing_value"], + ], + dtype=object, + ) + + imputer = SimpleImputer(strategy="constant") + X_trans = imputer.fit_transform(df) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]]) +def test_iterative_imputer_one_feature(X): + # check we exit early when there is a single feature + imputer = IterativeImputer().fit(X) + assert imputer.n_iter_ == 0 + imputer = IterativeImputer() + imputer.fit([[1], [2]]) + assert imputer.n_iter_ == 0 + imputer.fit([[1], [np.nan]]) + assert imputer.n_iter_ == 0 + + +def test_imputation_pipeline_grid_search(): + # Test imputation within a pipeline + gridsearch. + X = _sparse_random_matrix(100, 100, density=0.10) + missing_values = X.data[0] + + pipeline = Pipeline( + [ + ("imputer", SimpleImputer(missing_values=missing_values)), + ("tree", tree.DecisionTreeRegressor(random_state=0)), + ] + ) + + parameters = {"imputer__strategy": ["mean", "median", "most_frequent"]} + + Y = _sparse_random_matrix(100, 1, density=0.10).toarray() + gs = GridSearchCV(pipeline, parameters) + gs.fit(X, Y) + + +def test_imputation_copy(): + # Test imputation with copy + X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0) + + # copy=True, dense => copy + X = X_orig.copy().toarray() + imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) + Xt = imputer.fit(X).transform(X) + Xt[0, 0] = -1 + assert not np.all(X == Xt) + + # copy=True, sparse csr => copy + X = X_orig.copy() + imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) + Xt = imputer.fit(X).transform(X) + Xt.data[0] = -1 + assert not np.all(X.data == Xt.data) + + # copy=False, dense => no copy + X = X_orig.copy().toarray() + imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) + Xt = imputer.fit(X).transform(X) + Xt[0, 0] = -1 + assert_array_almost_equal(X, Xt) + + # copy=False, sparse csc => no copy + X = X_orig.copy().tocsc() + imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) + Xt = imputer.fit(X).transform(X) + Xt.data[0] = -1 + assert_array_almost_equal(X.data, Xt.data) + + # copy=False, sparse csr => copy + X = X_orig.copy() + imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) + Xt = imputer.fit(X).transform(X) + Xt.data[0] = -1 + assert not np.all(X.data == Xt.data) + + # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is + # made, even if copy=False. + + +def test_iterative_imputer_zero_iters(): + rng = np.random.RandomState(0) + + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + missing_flag = X == 0 + X[missing_flag] = np.nan + + imputer = IterativeImputer(max_iter=0) + X_imputed = imputer.fit_transform(X) + # with max_iter=0, only initial imputation is performed + assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) + + # repeat but force n_iter_ to 0 + imputer = IterativeImputer(max_iter=5).fit(X) + # transformed should not be equal to initial imputation + assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) + + imputer.n_iter_ = 0 + # now they should be equal as only initial imputation is done + assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) + + +def test_iterative_imputer_verbose(): + rng = np.random.RandomState(0) + + n = 100 + d = 3 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) + imputer.fit(X) + imputer.transform(X) + imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) + imputer.fit(X) + imputer.transform(X) + + +def test_iterative_imputer_all_missing(): + n = 100 + d = 3 + X = np.zeros((n, d)) + imputer = IterativeImputer(missing_values=0, max_iter=1) + X_imputed = imputer.fit_transform(X) + assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) + + +@pytest.mark.parametrize( + "imputation_order", ["random", "roman", "ascending", "descending", "arabic"] +) +def test_iterative_imputer_imputation_order(imputation_order): + rng = np.random.RandomState(0) + n = 100 + d = 10 + max_iter = 2 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + X[:, 0] = 1 # this column should not be discarded by IterativeImputer + + imputer = IterativeImputer( + missing_values=0, + max_iter=max_iter, + n_nearest_features=5, + sample_posterior=False, + skip_complete=True, + min_value=0, + max_value=1, + verbose=1, + imputation_order=imputation_order, + random_state=rng, + ) + imputer.fit_transform(X) + ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] + + assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_ + + if imputation_order == "roman": + assert np.all(ordered_idx[: d - 1] == np.arange(1, d)) + elif imputation_order == "arabic": + assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1)) + elif imputation_order == "random": + ordered_idx_round_1 = ordered_idx[: d - 1] + ordered_idx_round_2 = ordered_idx[d - 1 :] + assert ordered_idx_round_1 != ordered_idx_round_2 + elif "ending" in imputation_order: + assert len(ordered_idx) == max_iter * (d - 1) + + +@pytest.mark.parametrize( + "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] +) +def test_iterative_imputer_estimators(estimator): + rng = np.random.RandomState(0) + + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + + imputer = IterativeImputer( + missing_values=0, max_iter=1, estimator=estimator, random_state=rng + ) + imputer.fit_transform(X) + + # check that types are correct for estimators + hashes = [] + for triplet in imputer.imputation_sequence_: + expected_type = ( + type(estimator) if estimator is not None else type(BayesianRidge()) + ) + assert isinstance(triplet.estimator, expected_type) + hashes.append(id(triplet.estimator)) + + # check that each estimator is unique + assert len(set(hashes)) == len(hashes) + + +def test_iterative_imputer_clip(): + rng = np.random.RandomState(0) + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + + imputer = IterativeImputer( + missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng + ) + + Xt = imputer.fit_transform(X) + assert_allclose(np.min(Xt[X == 0]), 0.1) + assert_allclose(np.max(Xt[X == 0]), 0.2) + assert_allclose(Xt[X != 0], X[X != 0]) + + +def test_iterative_imputer_clip_truncnorm(): + rng = np.random.RandomState(0) + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + X[:, 0] = 1 + + imputer = IterativeImputer( + missing_values=0, + max_iter=2, + n_nearest_features=5, + sample_posterior=True, + min_value=0.1, + max_value=0.2, + verbose=1, + imputation_order="random", + random_state=rng, + ) + Xt = imputer.fit_transform(X) + assert_allclose(np.min(Xt[X == 0]), 0.1) + assert_allclose(np.max(Xt[X == 0]), 0.2) + assert_allclose(Xt[X != 0], X[X != 0]) + + +def test_iterative_imputer_truncated_normal_posterior(): + # test that the values that are imputed using `sample_posterior=True` + # with boundaries (`min_value` and `max_value` are not None) are drawn + # from a distribution that looks gaussian via the Kolmogorov Smirnov test. + # note that starting from the wrong random seed will make this test fail + # because random sampling doesn't occur at all when the imputation + # is outside of the (min_value, max_value) range + rng = np.random.RandomState(42) + + X = rng.normal(size=(5, 5)) + X[0][0] = np.nan + + imputer = IterativeImputer( + min_value=0, max_value=0.5, sample_posterior=True, random_state=rng + ) + + imputer.fit_transform(X) + # generate multiple imputations for the single missing value + imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) + + assert all(imputations >= 0) + assert all(imputations <= 0.5) + + mu, sigma = imputations.mean(), imputations.std() + ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm") + if sigma == 0: + sigma += 1e-12 + ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm") + # we want to fail to reject null hypothesis + # null hypothesis: distributions are the same + assert ks_statistic < 0.2 or p_value > 0.1, "The posterior does appear to be normal" + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +def test_iterative_imputer_missing_at_transform(strategy): + rng = np.random.RandomState(0) + n = 100 + d = 10 + X_train = rng.randint(low=0, high=3, size=(n, d)) + X_test = rng.randint(low=0, high=3, size=(n, d)) + + X_train[:, 0] = 1 # definitely no missing values in 0th column + X_test[0, 0] = 0 # definitely missing value in 0th column + + imputer = IterativeImputer( + missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng + ).fit(X_train) + initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) + + # if there were no missing values at time of fit, then imputer will + # only use the initial imputer for that feature at transform + assert_allclose( + imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0] + ) + + +def test_iterative_imputer_transform_stochasticity(): + rng1 = np.random.RandomState(0) + rng2 = np.random.RandomState(1) + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() + + # when sample_posterior=True, two transforms shouldn't be equal + imputer = IterativeImputer( + missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1 + ) + imputer.fit(X) + + X_fitted_1 = imputer.transform(X) + X_fitted_2 = imputer.transform(X) + + # sufficient to assert that the means are not the same + assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) + + # when sample_posterior=False, and n_nearest_features=None + # and imputation_order is not random + # the two transforms should be identical even if rng are different + imputer1 = IterativeImputer( + missing_values=0, + max_iter=1, + sample_posterior=False, + n_nearest_features=None, + imputation_order="ascending", + random_state=rng1, + ) + + imputer2 = IterativeImputer( + missing_values=0, + max_iter=1, + sample_posterior=False, + n_nearest_features=None, + imputation_order="ascending", + random_state=rng2, + ) + imputer1.fit(X) + imputer2.fit(X) + + X_fitted_1a = imputer1.transform(X) + X_fitted_1b = imputer1.transform(X) + X_fitted_2 = imputer2.transform(X) + + assert_allclose(X_fitted_1a, X_fitted_1b) + assert_allclose(X_fitted_1a, X_fitted_2) + + +def test_iterative_imputer_no_missing(): + rng = np.random.RandomState(0) + X = rng.rand(100, 100) + X[:, 0] = np.nan + m1 = IterativeImputer(max_iter=10, random_state=rng) + m2 = IterativeImputer(max_iter=10, random_state=rng) + pred1 = m1.fit(X).transform(X) + pred2 = m2.fit_transform(X) + # should exclude the first column entirely + assert_allclose(X[:, 1:], pred1) + # fit and fit_transform should both be identical + assert_allclose(pred1, pred2) + + +def test_iterative_imputer_rank_one(): + rng = np.random.RandomState(0) + d = 50 + A = rng.rand(d, 1) + B = rng.rand(1, d) + X = np.dot(A, B) + nan_mask = rng.rand(d, d) < 0.5 + X_missing = X.copy() + X_missing[nan_mask] = np.nan + + imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) + X_filled = imputer.fit_transform(X_missing) + assert_allclose(X_filled, X, atol=0.02) + + +@pytest.mark.parametrize("rank", [3, 5]) +def test_iterative_imputer_transform_recovery(rank): + rng = np.random.RandomState(0) + n = 70 + d = 70 + A = rng.rand(n, rank) + B = rng.rand(rank, d) + X_filled = np.dot(A, B) + nan_mask = rng.rand(n, d) < 0.5 + X_missing = X_filled.copy() + X_missing[nan_mask] = np.nan + + # split up data in half + n = n // 2 + X_train = X_missing[:n] + X_test_filled = X_filled[n:] + X_test = X_missing[n:] + + imputer = IterativeImputer( + max_iter=5, imputation_order="descending", verbose=1, random_state=rng + ).fit(X_train) + X_test_est = imputer.transform(X_test) + assert_allclose(X_test_filled, X_test_est, atol=0.1) + + +def test_iterative_imputer_additive_matrix(): + rng = np.random.RandomState(0) + n = 100 + d = 10 + A = rng.randn(n, d) + B = rng.randn(n, d) + X_filled = np.zeros(A.shape) + for i in range(d): + for j in range(d): + X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2 + # a quarter is randomly missing + nan_mask = rng.rand(n, d) < 0.25 + X_missing = X_filled.copy() + X_missing[nan_mask] = np.nan + + # split up data + n = n // 2 + X_train = X_missing[:n] + X_test_filled = X_filled[n:] + X_test = X_missing[n:] + + imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) + X_test_est = imputer.transform(X_test) + assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) + + +def test_iterative_imputer_early_stopping(): + rng = np.random.RandomState(0) + n = 50 + d = 5 + A = rng.rand(n, 1) + B = rng.rand(1, d) + X = np.dot(A, B) + nan_mask = rng.rand(n, d) < 0.5 + X_missing = X.copy() + X_missing[nan_mask] = np.nan + + imputer = IterativeImputer( + max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng + ) + X_filled_100 = imputer.fit_transform(X_missing) + assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ + + imputer = IterativeImputer( + max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng + ) + X_filled_early = imputer.fit_transform(X_missing) + assert_allclose(X_filled_100, X_filled_early, atol=1e-7) + + imputer = IterativeImputer( + max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng + ) + imputer.fit(X_missing) + assert imputer.n_iter_ == imputer.max_iter + + +def test_iterative_imputer_catch_warning(): + # check that we catch a RuntimeWarning due to a division by zero when a + # feature is constant in the dataset + X, y = load_diabetes(return_X_y=True) + n_samples, n_features = X.shape + + # simulate that a feature only contain one category during fit + X[:, 3] = 1 + + # add some missing values + rng = np.random.RandomState(0) + missing_rate = 0.15 + for feat in range(n_features): + sample_idx = rng.choice( + np.arange(n_samples), size=int(n_samples * missing_rate), replace=False + ) + X[sample_idx, feat] = np.nan + + imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + X_fill = imputer.fit_transform(X, y) + assert not np.any(np.isnan(X_fill)) + + +@pytest.mark.parametrize( + "min_value, max_value, correct_output", + [ + (0, 100, np.array([[0] * 3, [100] * 3])), + (None, None, np.array([[-np.inf] * 3, [np.inf] * 3])), + (-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])), + ([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])), + ( + [-5, -np.inf, 10], + [100, 200, np.inf], + np.array([[-5, -np.inf, 10], [100, 200, np.inf]]), + ), + ], + ids=["scalars", "None-default", "inf", "lists", "lists-with-inf"], +) +def test_iterative_imputer_min_max_array_like(min_value, max_value, correct_output): + # check that passing scalar or array-like + # for min_value and max_value in IterativeImputer works + X = np.random.RandomState(0).randn(10, 3) + imputer = IterativeImputer(min_value=min_value, max_value=max_value) + imputer.fit(X) + + assert isinstance(imputer._min_value, np.ndarray) and isinstance( + imputer._max_value, np.ndarray + ) + assert (imputer._min_value.shape[0] == X.shape[1]) and ( + imputer._max_value.shape[0] == X.shape[1] + ) + + assert_allclose(correct_output[0, :], imputer._min_value) + assert_allclose(correct_output[1, :], imputer._max_value) + + +@pytest.mark.parametrize( + "min_value, max_value, err_msg", + [ + (100, 0, "min_value >= max_value."), + (np.inf, -np.inf, "min_value >= max_value."), + ([-5, 5], [100, 200, 0], "_value' should be of shape"), + ], +) +def test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg): + # check that passing scalar or array-like + # for min_value and max_value in IterativeImputer works + X = np.random.random((10, 3)) + imputer = IterativeImputer(min_value=min_value, max_value=max_value) + with pytest.raises(ValueError, match=err_msg): + imputer.fit(X) + + +@pytest.mark.parametrize( + "min_max_1, min_max_2", + [([None, None], [-np.inf, np.inf]), ([-10, 10], [[-10] * 4, [10] * 4])], + ids=["None-vs-inf", "Scalar-vs-vector"], +) +def test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2): + # Test that None/inf and scalar/vector give the same imputation + X_train = np.array( + [ + [np.nan, 2, 2, 1], + [10, np.nan, np.nan, 7], + [3, 1, np.nan, 1], + [np.nan, 4, 2, np.nan], + ] + ) + X_test = np.array( + [[np.nan, 2, np.nan, 5], [2, 4, np.nan, np.nan], [np.nan, 1, 10, 1]] + ) + imputer1 = IterativeImputer( + min_value=min_max_1[0], max_value=min_max_1[1], random_state=0 + ) + imputer2 = IterativeImputer( + min_value=min_max_2[0], max_value=min_max_2[1], random_state=0 + ) + X_test_imputed1 = imputer1.fit(X_train).transform(X_test) + X_test_imputed2 = imputer2.fit(X_train).transform(X_test) + assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0]) + + +@pytest.mark.parametrize("skip_complete", [True, False]) +def test_iterative_imputer_skip_non_missing(skip_complete): + # check the imputing strategy when missing data are present in the + # testing set only. + # taken from: https://github.com/scikit-learn/scikit-learn/issues/14383 + rng = np.random.RandomState(0) + X_train = np.array([[5, 2, 2, 1], [10, 1, 2, 7], [3, 1, 1, 1], [8, 4, 2, 2]]) + X_test = np.array([[np.nan, 2, 4, 5], [np.nan, 4, 1, 2], [np.nan, 1, 10, 1]]) + imputer = IterativeImputer( + initial_strategy="mean", skip_complete=skip_complete, random_state=rng + ) + X_test_est = imputer.fit(X_train).transform(X_test) + if skip_complete: + # impute with the initial strategy: 'mean' + assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0])) + else: + assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4) + + +@pytest.mark.parametrize("rs_imputer", [None, 1, np.random.RandomState(seed=1)]) +@pytest.mark.parametrize("rs_estimator", [None, 1, np.random.RandomState(seed=1)]) +def test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator): + class ZeroEstimator: + def __init__(self, random_state): + self.random_state = random_state + + def fit(self, *args, **kgards): + return self + + def predict(self, X): + return np.zeros(X.shape[0]) + + estimator = ZeroEstimator(random_state=rs_estimator) + imputer = IterativeImputer(random_state=rs_imputer) + X_train = np.zeros((10, 3)) + imputer.fit(X_train) + assert estimator.random_state == rs_estimator + + +@pytest.mark.parametrize( + "X_fit, X_trans, params, msg_err", + [ + ( + np.array([[-1, 1], [1, 2]]), + np.array([[-1, 1], [1, -1]]), + {"features": "missing-only", "sparse": "auto"}, + "have missing values in transform but have no missing values in fit", + ), + ( + np.array([["a", "b"], ["c", "a"]], dtype=str), + np.array([["a", "b"], ["c", "a"]], dtype=str), + {}, + "MissingIndicator does not support data with dtype", + ), + ], +) +def test_missing_indicator_error(X_fit, X_trans, params, msg_err): + indicator = MissingIndicator(missing_values=-1) + indicator.set_params(**params) + with pytest.raises(ValueError, match=msg_err): + indicator.fit(X_fit).transform(X_trans) + + +def _generate_missing_indicator_cases(): + missing_values_dtypes = [(0, np.int32), (np.nan, np.float64), (-1, np.int32)] + arr_types = ( + [np.array] + + CSC_CONTAINERS + + CSR_CONTAINERS + + COO_CONTAINERS + + LIL_CONTAINERS + + BSR_CONTAINERS + ) + return [ + (arr_type, missing_values, dtype) + for arr_type, (missing_values, dtype) in product( + arr_types, missing_values_dtypes + ) + if not (missing_values == 0 and arr_type is not np.array) + ] + + +@pytest.mark.parametrize( + "arr_type, missing_values, dtype", _generate_missing_indicator_cases() +) +@pytest.mark.parametrize( + "param_features, n_features, features_indices", + [("missing-only", 3, np.array([0, 1, 2])), ("all", 3, np.array([0, 1, 2]))], +) +def test_missing_indicator_new( + missing_values, arr_type, dtype, param_features, n_features, features_indices +): + X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]]) + X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) + X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]]) + X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]]) + + # convert the input to the right array format and right dtype + X_fit = arr_type(X_fit).astype(dtype) + X_trans = arr_type(X_trans).astype(dtype) + X_fit_expected = X_fit_expected.astype(dtype) + X_trans_expected = X_trans_expected.astype(dtype) + + indicator = MissingIndicator( + missing_values=missing_values, features=param_features, sparse=False + ) + X_fit_mask = indicator.fit_transform(X_fit) + X_trans_mask = indicator.transform(X_trans) + + assert X_fit_mask.shape[1] == n_features + assert X_trans_mask.shape[1] == n_features + + assert_array_equal(indicator.features_, features_indices) + assert_allclose(X_fit_mask, X_fit_expected[:, features_indices]) + assert_allclose(X_trans_mask, X_trans_expected[:, features_indices]) + + assert X_fit_mask.dtype == bool + assert X_trans_mask.dtype == bool + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + + indicator.set_params(sparse=True) + X_fit_mask_sparse = indicator.fit_transform(X_fit) + X_trans_mask_sparse = indicator.transform(X_trans) + + assert X_fit_mask_sparse.dtype == bool + assert X_trans_mask_sparse.dtype == bool + assert X_fit_mask_sparse.format == "csc" + assert X_trans_mask_sparse.format == "csc" + assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask) + assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask) + + +@pytest.mark.parametrize( + "arr_type", + CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS, +) +def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type): + # test for sparse input and missing_value == 0 + + missing_values = 0 + X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) + X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) + + # convert the input to the right array format + X_fit_sparse = arr_type(X_fit) + X_trans_sparse = arr_type(X_trans) + + indicator = MissingIndicator(missing_values=missing_values) + + with pytest.raises(ValueError, match="Sparse input with missing_values=0"): + indicator.fit_transform(X_fit_sparse) + + indicator.fit_transform(X_fit) + with pytest.raises(ValueError, match="Sparse input with missing_values=0"): + indicator.transform(X_trans_sparse) + + +@pytest.mark.parametrize("param_sparse", [True, False, "auto"]) +@pytest.mark.parametrize( + "arr_type, missing_values", + [(np.array, 0)] + + list( + product( + CSC_CONTAINERS + + CSR_CONTAINERS + + COO_CONTAINERS + + LIL_CONTAINERS + + BSR_CONTAINERS, + [np.nan], + ) + ), +) +def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse): + # check the format of the output with different sparse parameter + X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) + X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) + X_fit = arr_type(X_fit).astype(np.float64) + X_trans = arr_type(X_trans).astype(np.float64) + + indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse) + X_fit_mask = indicator.fit_transform(X_fit) + X_trans_mask = indicator.transform(X_trans) + + if param_sparse is True: + assert X_fit_mask.format == "csc" + assert X_trans_mask.format == "csc" + elif param_sparse == "auto" and missing_values == 0: + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + elif param_sparse is False: + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + else: + if sparse.issparse(X_fit): + assert X_fit_mask.format == "csc" + assert X_trans_mask.format == "csc" + else: + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + + +def test_missing_indicator_string(): + X = np.array([["a", "b", "c"], ["b", "c", "a"]], dtype=object) + indicator = MissingIndicator(missing_values="a", features="all") + X_trans = indicator.fit_transform(X) + assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]])) + + +@pytest.mark.parametrize( + "X, missing_values, X_trans_exp", + [ + ( + np.array([["a", "b"], ["b", "a"]], dtype=object), + "a", + np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object), + ), + ( + np.array([[np.nan, 1.0], [1.0, np.nan]]), + np.nan, + np.array([[1.0, 1.0, True, False], [1.0, 1.0, False, True]]), + ), + ( + np.array([[np.nan, "b"], ["b", np.nan]], dtype=object), + np.nan, + np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object), + ), + ( + np.array([[None, "b"], ["b", None]], dtype=object), + None, + np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object), + ), + ], +) +def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp): + trans = make_union( + SimpleImputer(missing_values=missing_values, strategy="most_frequent"), + MissingIndicator(missing_values=missing_values), + ) + X_trans = trans.fit_transform(X) + assert_array_equal(X_trans, X_trans_exp) + + +@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) +@pytest.mark.parametrize( + "imputer_missing_values, missing_value, err_msg", + [ + ("NaN", np.nan, "Input X contains NaN"), + ("-1", -1, "types are expected to be both numerical."), + ], +) +def test_inconsistent_dtype_X_missing_values( + imputer_constructor, imputer_missing_values, missing_value, err_msg +): + # regression test for issue #11390. Comparison between incoherent dtype + # for X and missing_values was not raising a proper error. + rng = np.random.RandomState(42) + X = rng.randn(10, 10) + X[0, 0] = missing_value + + imputer = imputer_constructor(missing_values=imputer_missing_values) + + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(X) + + +def test_missing_indicator_no_missing(): + # check that all features are dropped if there are no missing values when + # features='missing-only' (#13491) + X = np.array([[1, 1], [1, 1]]) + + mi = MissingIndicator(features="missing-only", missing_values=-1) + Xt = mi.fit_transform(X) + + assert Xt.shape[1] == 0 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_missing_indicator_sparse_no_explicit_zeros(csr_container): + # Check that non missing values don't become explicit zeros in the mask + # generated by missing indicator when X is sparse. (#13491) + X = csr_container([[0, 1, 2], [1, 2, 0], [2, 0, 1]]) + + mi = MissingIndicator(features="all", missing_values=1) + Xt = mi.fit_transform(X) + + assert Xt.getnnz() == Xt.sum() + + +@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) +def test_imputer_without_indicator(imputer_constructor): + X = np.array([[1, 1], [1, 1]]) + imputer = imputer_constructor() + imputer.fit(X) + + assert imputer.indicator_ is None + + +@pytest.mark.parametrize( + "arr_type", + CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS, +) +def test_simple_imputation_add_indicator_sparse_matrix(arr_type): + X_sparse = arr_type([[np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9]]) + X_true = np.array( + [ + [3.0, 1.0, 5.0, 1.0, 0.0, 0.0], + [2.0, 2.0, 1.0, 0.0, 1.0, 0.0], + [6.0, 3.0, 5.0, 0.0, 0.0, 1.0], + [1.0, 2.0, 9.0, 0.0, 0.0, 0.0], + ] + ) + + imputer = SimpleImputer(missing_values=np.nan, add_indicator=True) + X_trans = imputer.fit_transform(X_sparse) + + assert sparse.issparse(X_trans) + assert X_trans.shape == X_true.shape + assert_allclose(X_trans.toarray(), X_true) + + +@pytest.mark.parametrize( + "strategy, expected", [("most_frequent", "b"), ("constant", "missing_value")] +) +def test_simple_imputation_string_list(strategy, expected): + X = [["a", "b"], ["c", np.nan]] + + X_true = np.array([["a", "b"], ["c", expected]], dtype=object) + + imputer = SimpleImputer(strategy=strategy) + X_trans = imputer.fit_transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize( + "order, idx_order", + [("ascending", [3, 4, 2, 0, 1]), ("descending", [1, 0, 2, 4, 3])], +) +def test_imputation_order(order, idx_order): + # regression test for #15393 + rng = np.random.RandomState(42) + X = rng.rand(100, 5) + X[:50, 1] = np.nan + X[:30, 0] = np.nan + X[:20, 2] = np.nan + X[:10, 4] = np.nan + + with pytest.warns(ConvergenceWarning): + trs = IterativeImputer(max_iter=1, imputation_order=order, random_state=0).fit( + X + ) + idx = [x.feat_idx for x in trs.imputation_sequence_] + assert idx == idx_order + + +@pytest.mark.parametrize("missing_value", [-1, np.nan]) +def test_simple_imputation_inverse_transform(missing_value): + # Test inverse_transform feature for np.nan + X_1 = np.array( + [ + [9, missing_value, 3, -1], + [4, -1, 5, 4], + [6, 7, missing_value, -1], + [8, 9, 0, missing_value], + ] + ) + + X_2 = np.array( + [ + [5, 4, 2, 1], + [2, 1, missing_value, 3], + [9, missing_value, 7, 1], + [6, 4, 2, missing_value], + ] + ) + + X_3 = np.array( + [ + [1, missing_value, 5, 9], + [missing_value, 4, missing_value, missing_value], + [2, missing_value, 7, missing_value], + [missing_value, 3, missing_value, 8], + ] + ) + + X_4 = np.array( + [ + [1, 1, 1, 3], + [missing_value, 2, missing_value, 1], + [2, 3, 3, 4], + [missing_value, 4, missing_value, 2], + ] + ) + + imputer = SimpleImputer( + missing_values=missing_value, strategy="mean", add_indicator=True + ) + + X_1_trans = imputer.fit_transform(X_1) + X_1_inv_trans = imputer.inverse_transform(X_1_trans) + + X_2_trans = imputer.transform(X_2) # test on new data + X_2_inv_trans = imputer.inverse_transform(X_2_trans) + + assert_array_equal(X_1_inv_trans, X_1) + assert_array_equal(X_2_inv_trans, X_2) + + for X in [X_3, X_4]: + X_trans = imputer.fit_transform(X) + X_inv_trans = imputer.inverse_transform(X_trans) + assert_array_equal(X_inv_trans, X) + + +@pytest.mark.parametrize("missing_value", [-1, np.nan]) +def test_simple_imputation_inverse_transform_exceptions(missing_value): + X_1 = np.array( + [ + [9, missing_value, 3, -1], + [4, -1, 5, 4], + [6, 7, missing_value, -1], + [8, 9, 0, missing_value], + ] + ) + + imputer = SimpleImputer(missing_values=missing_value, strategy="mean") + X_1_trans = imputer.fit_transform(X_1) + with pytest.raises( + ValueError, match=f"Got 'add_indicator={imputer.add_indicator}'" + ): + imputer.inverse_transform(X_1_trans) + + +@pytest.mark.parametrize( + "expected,array,dtype,extra_value,n_repeat", + [ + # array of object dtype + ("extra_value", ["a", "b", "c"], object, "extra_value", 2), + ( + "most_frequent_value", + ["most_frequent_value", "most_frequent_value", "value"], + object, + "extra_value", + 1, + ), + ("a", ["min_value", "min_valuevalue"], object, "a", 2), + ("min_value", ["min_value", "min_value", "value"], object, "z", 2), + # array of numeric dtype + (10, [1, 2, 3], int, 10, 2), + (1, [1, 1, 2], int, 10, 1), + (10, [20, 20, 1], int, 10, 2), + (1, [1, 1, 20], int, 10, 2), + ], +) +def test_most_frequent(expected, array, dtype, extra_value, n_repeat): + assert expected == _most_frequent( + np.array(array, dtype=dtype), extra_value, n_repeat + ) + + +@pytest.mark.parametrize( + "initial_strategy", ["mean", "median", "most_frequent", "constant"] +) +def test_iterative_imputer_keep_empty_features(initial_strategy): + """Check the behaviour of the iterative imputer with different initial strategy + and keeping empty features (i.e. features containing only missing values). + """ + X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]]) + + imputer = IterativeImputer( + initial_strategy=initial_strategy, keep_empty_features=True + ) + X_imputed = imputer.fit_transform(X) + assert_allclose(X_imputed[:, 1], 0) + X_imputed = imputer.transform(X) + assert_allclose(X_imputed[:, 1], 0) + + +def test_iterative_imputer_constant_fill_value(): + """Check that we propagate properly the parameter `fill_value`.""" + X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]]) + + fill_value = 100 + imputer = IterativeImputer( + missing_values=-1, + initial_strategy="constant", + fill_value=fill_value, + max_iter=0, + ) + imputer.fit_transform(X) + assert_array_equal(imputer.initial_imputer_.statistics_, fill_value) + + +@pytest.mark.parametrize("keep_empty_features", [True, False]) +def test_knn_imputer_keep_empty_features(keep_empty_features): + """Check the behaviour of `keep_empty_features` for `KNNImputer`.""" + X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]]) + + imputer = KNNImputer(keep_empty_features=keep_empty_features) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + if keep_empty_features: + assert X_imputed.shape == X.shape + assert_array_equal(X_imputed[:, 1], 0) + else: + assert X_imputed.shape == (X.shape[0], X.shape[1] - 1) + + +def test_simple_impute_pd_na(): + pd = pytest.importorskip("pandas") + + # Impute pandas array of string types. + df = pd.DataFrame({"feature": pd.Series(["abc", None, "de"], dtype="string")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value="na") + _assert_array_equal_and_same_dtype( + imputer.fit_transform(df), np.array([["abc"], ["na"], ["de"]], dtype=object) + ) + + # Impute pandas array of string types without any missing values. + df = pd.DataFrame({"feature": pd.Series(["abc", "de", "fgh"], dtype="string")}) + imputer = SimpleImputer(fill_value="ok", strategy="constant") + _assert_array_equal_and_same_dtype( + imputer.fit_transform(df), np.array([["abc"], ["de"], ["fgh"]], dtype=object) + ) + + # Impute pandas array of integer types. + df = pd.DataFrame({"feature": pd.Series([1, None, 3], dtype="Int64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-1) + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64") + ) + + # Use `np.nan` also works. + imputer = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=-1) + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64") + ) + + # Impute pandas array of integer types with 'median' strategy. + df = pd.DataFrame({"feature": pd.Series([1, None, 2, 3], dtype="Int64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="median") + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype="float64") + ) + + # Impute pandas array of integer types with 'mean' strategy. + df = pd.DataFrame({"feature": pd.Series([1, None, 2], dtype="Int64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="mean") + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype="float64") + ) + + # Impute pandas array of float types. + df = pd.DataFrame({"feature": pd.Series([1.0, None, 3.0], dtype="float64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-2.0) + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype="float64") + ) + + # Impute pandas array of float types with 'median' strategy. + df = pd.DataFrame({"feature": pd.Series([1.0, None, 2.0, 3.0], dtype="float64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="median") + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), + np.array([[1.0], [2.0], [2.0], [3.0]], dtype="float64"), + ) + + +def test_missing_indicator_feature_names_out(): + """Check that missing indicator return the feature names with a prefix.""" + pd = pytest.importorskip("pandas") + + missing_values = np.nan + X = pd.DataFrame( + [ + [missing_values, missing_values, 1, missing_values], + [4, missing_values, 2, 10], + ], + columns=["a", "b", "c", "d"], + ) + + indicator = MissingIndicator(missing_values=missing_values).fit(X) + feature_names = indicator.get_feature_names_out() + expected_names = ["missingindicator_a", "missingindicator_b", "missingindicator_d"] + assert_array_equal(expected_names, feature_names) + + +def test_imputer_lists_fit_transform(): + """Check transform uses object dtype when fitted on an object dtype. + + Non-regression test for #19572. + """ + + X = [["a", "b"], ["c", "b"], ["a", "a"]] + imp_frequent = SimpleImputer(strategy="most_frequent").fit(X) + X_trans = imp_frequent.transform([[np.nan, np.nan]]) + assert X_trans.dtype == object + assert_array_equal(X_trans, [["a", "b"]]) + + +@pytest.mark.parametrize("dtype_test", [np.float32, np.float64]) +def test_imputer_transform_preserves_numeric_dtype(dtype_test): + """Check transform preserves numeric dtype independent of fit dtype.""" + X = np.asarray( + [[1.2, 3.4, np.nan], [np.nan, 1.2, 1.3], [4.2, 2, 1]], dtype=np.float64 + ) + imp = SimpleImputer().fit(X) + + X_test = np.asarray([[np.nan, np.nan, np.nan]], dtype=dtype_test) + X_trans = imp.transform(X_test) + assert X_trans.dtype == dtype_test + + +@pytest.mark.parametrize("array_type", ["array", "sparse"]) +@pytest.mark.parametrize("keep_empty_features", [True, False]) +def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features): + """Check the behaviour of `keep_empty_features` with `strategy='constant'. + For backward compatibility, a column full of missing values will always be + fill and never dropped. + """ + X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]]) + X = _convert_container(X, array_type) + fill_value = 10 + imputer = SimpleImputer( + strategy="constant", + fill_value=fill_value, + keep_empty_features=keep_empty_features, + ) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + assert X_imputed.shape == X.shape + constant_feature = ( + X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0] + ) + assert_array_equal(constant_feature, fill_value) + + +@pytest.mark.parametrize("array_type", ["array", "sparse"]) +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +@pytest.mark.parametrize("keep_empty_features", [True, False]) +def test_simple_imputer_keep_empty_features(strategy, array_type, keep_empty_features): + """Check the behaviour of `keep_empty_features` with all strategies but + 'constant'. + """ + X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]]) + X = _convert_container(X, array_type) + imputer = SimpleImputer(strategy=strategy, keep_empty_features=keep_empty_features) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + if keep_empty_features: + assert X_imputed.shape == X.shape + constant_feature = ( + X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0] + ) + assert_array_equal(constant_feature, 0) + else: + assert X_imputed.shape == (X.shape[0], X.shape[1] - 1) + + +def test_simple_imputer_constant_fill_value_casting(): + """Check that we raise a proper error message when we cannot cast the fill value + to the input data type. Otherwise, check that the casting is done properly. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28309 + """ + # cannot cast fill_value at fit + fill_value = 1.5 + X_int64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.int64) + imputer = SimpleImputer( + strategy="constant", fill_value=fill_value, missing_values=2 + ) + err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast" + with pytest.raises(ValueError, match=re.escape(err_msg)): + imputer.fit(X_int64) + + # cannot cast fill_value at transform + X_float64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float64) + imputer.fit(X_float64) + err_msg = ( + f"The dtype of the filling value (i.e. {imputer.statistics_.dtype!r}) " + "cannot be cast" + ) + with pytest.raises(ValueError, match=re.escape(err_msg)): + imputer.transform(X_int64) + + # check that no error is raised when having the same kind of dtype + fill_value_list = [np.float64(1.5), 1.5, 1] + X_float32 = X_float64.astype(np.float32) + + for fill_value in fill_value_list: + imputer = SimpleImputer( + strategy="constant", fill_value=fill_value, missing_values=2 + ) + X_trans = imputer.fit_transform(X_float32) + assert X_trans.dtype == X_float32.dtype diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py new file mode 100644 index 0000000000000000000000000000000000000000..141c2ea90dbd9b3b4db6277e0c59adcf106931c0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py @@ -0,0 +1,547 @@ +import numpy as np +import pytest + +from sklearn import config_context +from sklearn.impute import KNNImputer +from sklearn.metrics.pairwise import nan_euclidean_distances, pairwise_distances +from sklearn.neighbors import KNeighborsRegressor +from sklearn.utils._testing import assert_allclose + + +@pytest.mark.parametrize("weights", ["uniform", "distance"]) +@pytest.mark.parametrize("n_neighbors", range(1, 6)) +def test_knn_imputer_shape(weights, n_neighbors): + # Verify the shapes of the imputed matrix for different weights and + # number of neighbors. + n_rows = 10 + n_cols = 2 + X = np.random.rand(n_rows, n_cols) + X[0, 0] = np.nan + + imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights) + X_imputed = imputer.fit_transform(X) + assert X_imputed.shape == (n_rows, n_cols) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_default_with_invalid_input(na): + # Test imputation with default values and invalid input + + # Test with inf present + X = np.array( + [ + [np.inf, 1, 1, 2, na], + [2, 1, 2, 2, 3], + [3, 2, 3, 3, 8], + [na, 6, 0, 5, 13], + [na, 7, 0, 7, 8], + [6, 6, 2, 5, 7], + ] + ) + with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"): + KNNImputer(missing_values=na).fit(X) + + # Test with inf present in matrix passed in transform() + X = np.array( + [ + [np.inf, 1, 1, 2, na], + [2, 1, 2, 2, 3], + [3, 2, 3, 3, 8], + [na, 6, 0, 5, 13], + [na, 7, 0, 7, 8], + [6, 6, 2, 5, 7], + ] + ) + + X_fit = np.array( + [ + [0, 1, 1, 2, na], + [2, 1, 2, 2, 3], + [3, 2, 3, 3, 8], + [na, 6, 0, 5, 13], + [na, 7, 0, 7, 8], + [6, 6, 2, 5, 7], + ] + ) + imputer = KNNImputer(missing_values=na).fit(X_fit) + with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"): + imputer.transform(X) + + # Test with missing_values=0 when NaN present + imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform") + X = np.array( + [ + [np.nan, 0, 0, 0, 5], + [np.nan, 1, 0, np.nan, 3], + [np.nan, 2, 0, 0, 0], + [np.nan, 6, 0, 5, 13], + ] + ) + msg = "Input X contains NaN" + with pytest.raises(ValueError, match=msg): + imputer.fit(X) + + X = np.array( + [ + [0, 0], + [np.nan, 2], + ] + ) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_removes_all_na_features(na): + X = np.array( + [ + [1, 1, na, 1, 1, 1.0], + [2, 3, na, 2, 2, 2], + [3, 4, na, 3, 3, na], + [6, 4, na, na, 6, 6], + ] + ) + knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X) + + X_transform = knn.transform(X) + assert not np.isnan(X_transform).any() + assert X_transform.shape == (4, 5) + + X_test = np.arange(0, 12).reshape(2, 6) + X_transform = knn.transform(X_test) + assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_zero_nan_imputes_the_same(na): + # Test with an imputable matrix and compare with different missing_values + X_zero = np.array( + [ + [1, 0, 1, 1, 1.0], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 0], + [6, 6, 0, 6, 6], + ] + ) + + X_nan = np.array( + [ + [1, na, 1, 1, 1.0], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, na], + [6, 6, na, 6, 6], + ] + ) + + X_imputed = np.array( + [ + [1, 2.5, 1, 1, 1.0], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 1.5], + [6, 6, 2.5, 6, 6], + ] + ) + + imputer_zero = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform") + + imputer_nan = KNNImputer(missing_values=na, n_neighbors=2, weights="uniform") + + assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed) + assert_allclose( + imputer_zero.fit_transform(X_zero), imputer_nan.fit_transform(X_nan) + ) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_verify(na): + # Test with an imputable matrix + X = np.array( + [ + [1, 0, 0, 1], + [2, 1, 2, na], + [3, 2, 3, na], + [na, 4, 5, 5], + [6, na, 6, 7], + [8, 8, 8, 8], + [16, 15, 18, 19], + ] + ) + + X_imputed = np.array( + [ + [1, 0, 0, 1], + [2, 1, 2, 8], + [3, 2, 3, 8], + [4, 4, 5, 5], + [6, 3, 6, 7], + [8, 8, 8, 8], + [16, 15, 18, 19], + ] + ) + + imputer = KNNImputer(missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + # Test when there is not enough neighbors + X = np.array( + [ + [1, 0, 0, na], + [2, 1, 2, na], + [3, 2, 3, na], + [4, 4, 5, na], + [6, 7, 6, na], + [8, 8, 8, na], + [20, 20, 20, 20], + [22, 22, 22, 22], + ] + ) + + # Not enough neighbors, use column mean from training + X_impute_value = (20 + 22) / 2 + X_imputed = np.array( + [ + [1, 0, 0, X_impute_value], + [2, 1, 2, X_impute_value], + [3, 2, 3, X_impute_value], + [4, 4, 5, X_impute_value], + [6, 7, 6, X_impute_value], + [8, 8, 8, X_impute_value], + [20, 20, 20, 20], + [22, 22, 22, 22], + ] + ) + + imputer = KNNImputer(missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + # Test when data in fit() and transform() are different + X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 16]]) + + X1 = np.array([[1, 0], [3, 2], [4, na]]) + + X_2_1 = (0 + 3 + 6 + 7 + 8) / 5 + X1_imputed = np.array([[1, 0], [3, 2], [4, X_2_1]]) + + imputer = KNNImputer(missing_values=na) + assert_allclose(imputer.fit(X).transform(X1), X1_imputed) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_one_n_neighbors(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]]) + + X_imputed = np.array([[0, 0], [4, 2], [4, 3], [5, 3], [7, 7], [7, 8], [14, 13]]) + + imputer = KNNImputer(n_neighbors=1, missing_values=na) + + assert_allclose(imputer.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_all_samples_are_neighbors(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]]) + + X_imputed = np.array([[0, 0], [6, 2], [4, 3], [5, 5.5], [7, 7], [6, 8], [14, 13]]) + + n_neighbors = X.shape[0] - 1 + imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na) + + assert_allclose(imputer.fit_transform(X), X_imputed) + + n_neighbors = X.shape[0] + imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na) + assert_allclose(imputer_plus1.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_weight_uniform(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]) + + # Test with "uniform" weight (or unweighted) + X_imputed_uniform = np.array( + [[0, 0], [5, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] + ) + + imputer = KNNImputer(weights="uniform", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_uniform) + + # Test with "callable" weight + def no_weight(dist): + return None + + imputer = KNNImputer(weights=no_weight, missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_uniform) + + # Test with "callable" uniform weight + def uniform_weight(dist): + return np.ones_like(dist) + + imputer = KNNImputer(weights=uniform_weight, missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_uniform) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_weight_distance(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]) + + # Test with "distance" weight + nn = KNeighborsRegressor(metric="euclidean", weights="distance") + X_rows_idx = [0, 2, 3, 4, 5, 6] + nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0]) + knn_imputed_value = nn.predict(X[1:2, 1:])[0] + + # Manual calculation + X_neighbors_idx = [0, 2, 3, 4, 5] + dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na) + weights = 1 / dist[:, X_neighbors_idx].ravel() + manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights) + + X_imputed_distance1 = np.array( + [[0, 0], [manual_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] + ) + + # NearestNeighbor calculation + X_imputed_distance2 = np.array( + [[0, 0], [knn_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] + ) + + imputer = KNNImputer(weights="distance", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_distance1) + assert_allclose(imputer.fit_transform(X), X_imputed_distance2) + + # Test with weights = "distance" and n_neighbors=2 + X = np.array( + [ + [na, 0, 0], + [2, 1, 2], + [3, 2, 3], + [4, 5, 5], + ] + ) + + # neighbors are rows 1, 2, the nan_euclidean_distances are: + dist_0_1 = np.sqrt((3 / 2) * ((1 - 0) ** 2 + (2 - 0) ** 2)) + dist_0_2 = np.sqrt((3 / 2) * ((2 - 0) ** 2 + (3 - 0) ** 2)) + imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2]) + + X_imputed = np.array( + [ + [imputed_value, 0, 0], + [2, 1, 2], + [3, 2, 3], + [4, 5, 5], + ] + ) + + imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + # Test with varying missingness patterns + X = np.array( + [ + [1, 0, 0, 1], + [0, na, 1, na], + [1, 1, 1, na], + [0, 1, 0, 0], + [0, 0, 0, 0], + [1, 0, 1, 1], + [10, 10, 10, 10], + ] + ) + + # Get weights of donor neighbors + dist = nan_euclidean_distances(X, missing_values=na) + r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]] + r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]] + r1c1_nbor_wt = 1 / r1c1_nbor_dists + r1c3_nbor_wt = 1 / r1c3_nbor_dists + + r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]] + r2c3_nbor_wt = 1 / r2c3_nbor_dists + + # Collect donor values + col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy() + col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy() + + # Final imputed values + r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt) + r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt) + r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt) + + X_imputed = np.array( + [ + [1, 0, 0, 1], + [0, r1c1_imp, 1, r1c3_imp], + [1, 1, 1, r2c3_imp], + [0, 1, 0, 0], + [0, 0, 0, 0], + [1, 0, 1, 1], + [10, 10, 10, 10], + ] + ) + + imputer = KNNImputer(weights="distance", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + X = np.array( + [ + [0, 0, 0, na], + [1, 1, 1, na], + [2, 2, na, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [na, 7, 7, 7], + ] + ) + + dist = pairwise_distances( + X, metric="nan_euclidean", squared=False, missing_values=na + ) + + # Calculate weights + r0c3_w = 1.0 / dist[0, 2:-1] + r1c3_w = 1.0 / dist[1, 2:-1] + r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)] + r7c0_w = 1.0 / dist[7, 2:7] + + # Calculate weighted averages + r0c3 = np.average(X[2:-1, -1], weights=r0c3_w) + r1c3 = np.average(X[2:-1, -1], weights=r1c3_w) + r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w) + r7c0 = np.average(X[2:7, 0], weights=r7c0_w) + + X_imputed = np.array( + [ + [0, 0, 0, r0c3], + [1, 1, 1, r1c3], + [2, 2, r2c2, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [r7c0, 7, 7, 7], + ] + ) + + imputer_comp_wt = KNNImputer(missing_values=na, weights="distance") + assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed) + + +def test_knn_imputer_callable_metric(): + # Define callable metric that returns the l1 norm: + def custom_callable(x, y, missing_values=np.nan, squared=False): + x = np.ma.array(x, mask=np.isnan(x)) + y = np.ma.array(y, mask=np.isnan(y)) + dist = np.nansum(np.abs(x - y)) + return dist + + X = np.array([[4, 3, 3, np.nan], [6, 9, 6, 9], [4, 8, 6, 9], [np.nan, 9, 11, 10.0]]) + + X_0_3 = (9 + 9) / 2 + X_3_0 = (6 + 4) / 2 + X_imputed = np.array( + [[4, 3, 3, X_0_3], [6, 9, 6, 9], [4, 8, 6, 9], [X_3_0, 9, 11, 10.0]] + ) + + imputer = KNNImputer(n_neighbors=2, metric=custom_callable) + assert_allclose(imputer.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("working_memory", [None, 0]) +@pytest.mark.parametrize("na", [-1, np.nan]) +# Note that we use working_memory=0 to ensure that chunking is tested, even +# for a small dataset. However, it should raise a UserWarning that we ignore. +@pytest.mark.filterwarnings("ignore:adhere to working_memory") +def test_knn_imputer_with_simple_example(na, working_memory): + X = np.array( + [ + [0, na, 0, na], + [1, 1, 1, na], + [2, 2, na, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [na, 7, 7, 7], + ] + ) + + r0c1 = np.mean(X[1:6, 1]) + r0c3 = np.mean(X[2:-1, -1]) + r1c3 = np.mean(X[2:-1, -1]) + r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2]) + r7c0 = np.mean(X[2:-1, 0]) + + X_imputed = np.array( + [ + [0, r0c1, 0, r0c3], + [1, 1, 1, r1c3], + [2, 2, r2c2, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [r7c0, 7, 7, 7], + ] + ) + + with config_context(working_memory=working_memory): + imputer_comp = KNNImputer(missing_values=na) + assert_allclose(imputer_comp.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("na", [-1, np.nan]) +@pytest.mark.parametrize("weights", ["uniform", "distance"]) +def test_knn_imputer_not_enough_valid_distances(na, weights): + # Samples with needed feature has nan distance + X1 = np.array([[na, 11], [na, 1], [3, na]]) + X1_imputed = np.array([[3, 11], [3, 1], [3, 6]]) + + knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights) + assert_allclose(knn.fit_transform(X1), X1_imputed) + + X2 = np.array([[4, na]]) + X2_imputed = np.array([[4, 6]]) + assert_allclose(knn.transform(X2), X2_imputed) + + +@pytest.mark.parametrize("na", [-1, np.nan]) +def test_knn_imputer_drops_all_nan_features(na): + X1 = np.array([[na, 1], [na, 2]]) + knn = KNNImputer(missing_values=na, n_neighbors=1) + X1_expected = np.array([[1], [2]]) + assert_allclose(knn.fit_transform(X1), X1_expected) + + X2 = np.array([[1, 2], [3, na]]) + X2_expected = np.array([[2], [1.5]]) + assert_allclose(knn.transform(X2), X2_expected) + + +@pytest.mark.parametrize("working_memory", [None, 0]) +@pytest.mark.parametrize("na", [-1, np.nan]) +def test_knn_imputer_distance_weighted_not_enough_neighbors(na, working_memory): + X = np.array([[3, na], [2, na], [na, 4], [5, 6], [6, 8], [na, 5]]) + + dist = pairwise_distances( + X, metric="nan_euclidean", squared=False, missing_values=na + ) + + X_01 = np.average(X[3:5, 1], weights=1 / dist[0, 3:5]) + X_11 = np.average(X[3:5, 1], weights=1 / dist[1, 3:5]) + X_20 = np.average(X[3:5, 0], weights=1 / dist[2, 3:5]) + X_50 = np.average(X[3:5, 0], weights=1 / dist[5, 3:5]) + + X_expected = np.array([[3, X_01], [2, X_11], [X_20, 4], [5, 6], [6, 8], [X_50, 5]]) + + with config_context(working_memory=working_memory): + knn_3 = KNNImputer(missing_values=na, n_neighbors=3, weights="distance") + assert_allclose(knn_3.fit_transform(X), X_expected) + + knn_4 = KNNImputer(missing_values=na, n_neighbors=4, weights="distance") + assert_allclose(knn_4.fit_transform(X), X_expected) + + +@pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)]) +def test_knn_tags(na, allow_nan): + knn = KNNImputer(missing_values=na) + assert knn._get_tags()["allow_nan"] == allow_nan diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8c7899cad4d07213fbb427f0e3ab473bf6e964f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2b71ba4371f08c91ad12d28568bc09321bc7c52 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e14b7b82f0e50b7c7d069196d8f26b8bd6d86538 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..063ffe6376aa9ceab7a4d8b3a769b2d9eb6f670c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abd25e68ace5a0ded68fac3f7269b9f559487a7a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc1dca92c2aa0e2b10d8796fc24eb6e21305abd0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e038fb02cb1f0cf57732989838d998ddc4c32ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..713743810aff855f07a9eda553a619d0b3db662f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba398b05af9e4f284987c5f786ae5039c076a0d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ad48d566f28b88cd8ee690073acdc99b744905 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36d58c4753a52be35f0c17aa0ff4be1316d04bfa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0d64ce24cdd634bc2afcd9357388c7404eb7edbe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/__init__.py @@ -0,0 +1,25 @@ +""" +The :mod:`sklearn.svm` module includes Support Vector Machine algorithms. +""" + +# See http://scikit-learn.sourceforge.net/modules/svm.html for complete +# documentation. + +# Author: Fabian Pedregosa with help from +# the scikit-learn community. LibSVM and LibLinear are copyright +# of their respective owners. +# License: BSD 3 clause (C) INRIA 2010 + +from ._bounds import l1_min_c +from ._classes import SVC, SVR, LinearSVC, LinearSVR, NuSVC, NuSVR, OneClassSVM + +__all__ = [ + "LinearSVC", + "LinearSVR", + "NuSVC", + "NuSVR", + "OneClassSVM", + "SVC", + "SVR", + "l1_min_c", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6d154c99dc6694eb38721cd411a7f68c3a074f66 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_base.py @@ -0,0 +1,1249 @@ +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, ClassifierMixin, _fit_context +from ..exceptions import ConvergenceWarning, NotFittedError +from ..preprocessing import LabelEncoder +from ..utils import check_array, check_random_state, column_or_1d, compute_class_weight +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.metaestimators import available_if +from ..utils.multiclass import _ovr_decision_function, check_classification_targets +from ..utils.validation import ( + _check_large_sparse, + _check_sample_weight, + _num_samples, + check_consistent_length, + check_is_fitted, +) +from . import _liblinear as liblinear # type: ignore + +# mypy error: error: Module 'sklearn.svm' has no attribute '_libsvm' +# (and same for other imports) +from . import _libsvm as libsvm # type: ignore +from . import _libsvm_sparse as libsvm_sparse # type: ignore + +LIBSVM_IMPL = ["c_svc", "nu_svc", "one_class", "epsilon_svr", "nu_svr"] + + +def _one_vs_one_coef(dual_coef, n_support, support_vectors): + """Generate primal coefficients from dual coefficients + for the one-vs-one multi class LibSVM in the case + of a linear kernel.""" + + # get 1vs1 weights for all n*(n-1) classifiers. + # this is somewhat messy. + # shape of dual_coef_ is nSV * (n_classes -1) + # see docs for details + n_class = dual_coef.shape[0] + 1 + + # XXX we could do preallocation of coef but + # would have to take care in the sparse case + coef = [] + sv_locs = np.cumsum(np.hstack([[0], n_support])) + for class1 in range(n_class): + # SVs for class1: + sv1 = support_vectors[sv_locs[class1] : sv_locs[class1 + 1], :] + for class2 in range(class1 + 1, n_class): + # SVs for class1: + sv2 = support_vectors[sv_locs[class2] : sv_locs[class2 + 1], :] + + # dual coef for class1 SVs: + alpha1 = dual_coef[class2 - 1, sv_locs[class1] : sv_locs[class1 + 1]] + # dual coef for class2 SVs: + alpha2 = dual_coef[class1, sv_locs[class2] : sv_locs[class2 + 1]] + # build weight for class1 vs class2 + + coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2)) + return coef + + +class BaseLibSVM(BaseEstimator, metaclass=ABCMeta): + """Base class for estimators that use libsvm as backing library. + + This implements support vector machine classification and regression. + + Parameter documentation is in the derived `SVC` class. + """ + + _parameter_constraints: dict = { + "kernel": [ + StrOptions({"linear", "poly", "rbf", "sigmoid", "precomputed"}), + callable, + ], + "degree": [Interval(Integral, 0, None, closed="left")], + "gamma": [ + StrOptions({"scale", "auto"}), + Interval(Real, 0.0, None, closed="left"), + ], + "coef0": [Interval(Real, None, None, closed="neither")], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "C": [Interval(Real, 0.0, None, closed="neither")], + "nu": [Interval(Real, 0.0, 1.0, closed="right")], + "epsilon": [Interval(Real, 0.0, None, closed="left")], + "shrinking": ["boolean"], + "probability": ["boolean"], + "cache_size": [Interval(Real, 0, None, closed="neither")], + "class_weight": [StrOptions({"balanced"}), dict, None], + "verbose": ["verbose"], + "max_iter": [Interval(Integral, -1, None, closed="left")], + "random_state": ["random_state"], + } + + # The order of these must match the integer values in LibSVM. + # XXX These are actually the same in the dense case. Need to factor + # this out. + _sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"] + + @abstractmethod + def __init__( + self, + kernel, + degree, + gamma, + coef0, + tol, + C, + nu, + epsilon, + shrinking, + probability, + cache_size, + class_weight, + verbose, + max_iter, + random_state, + ): + if self._impl not in LIBSVM_IMPL: + raise ValueError( + "impl should be one of %s, %s was given" % (LIBSVM_IMPL, self._impl) + ) + + self.kernel = kernel + self.degree = degree + self.gamma = gamma + self.coef0 = coef0 + self.tol = tol + self.C = C + self.nu = nu + self.epsilon = epsilon + self.shrinking = shrinking + self.probability = probability + self.cache_size = cache_size + self.class_weight = class_weight + self.verbose = verbose + self.max_iter = max_iter + self.random_state = random_state + + def _more_tags(self): + # Used by cross_val_score. + return {"pairwise": self.kernel == "precomputed"} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the SVM model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) \ + or (n_samples, n_samples) + Training vectors, where `n_samples` is the number of samples + and `n_features` is the number of features. + For kernel="precomputed", the expected shape of X is + (n_samples, n_samples). + + y : array-like of shape (n_samples,) + Target values (class labels in classification, real numbers in + regression). + + sample_weight : array-like of shape (n_samples,), default=None + Per-sample weights. Rescale C per sample. Higher weights + force the classifier to put more emphasis on these points. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + If X and y are not C-ordered and contiguous arrays of np.float64 and + X is not a scipy.sparse.csr_matrix, X and/or y may be copied. + + If X is a dense array, then the other methods will not support sparse + matrices as input. + """ + rnd = check_random_state(self.random_state) + + sparse = sp.issparse(X) + if sparse and self.kernel == "precomputed": + raise TypeError("Sparse precomputed kernels are not supported.") + self._sparse = sparse and not callable(self.kernel) + + if callable(self.kernel): + check_consistent_length(X, y) + else: + X, y = self._validate_data( + X, + y, + dtype=np.float64, + order="C", + accept_sparse="csr", + accept_large_sparse=False, + ) + + y = self._validate_targets(y) + + sample_weight = np.asarray( + [] if sample_weight is None else sample_weight, dtype=np.float64 + ) + solver_type = LIBSVM_IMPL.index(self._impl) + + # input validation + n_samples = _num_samples(X) + if solver_type != 2 and n_samples != y.shape[0]: + raise ValueError( + "X and y have incompatible shapes.\n" + + "X has %s samples, but y has %s." % (n_samples, y.shape[0]) + ) + + if self.kernel == "precomputed" and n_samples != X.shape[1]: + raise ValueError( + "Precomputed matrix must be a square matrix." + " Input is a {}x{} matrix.".format(X.shape[0], X.shape[1]) + ) + + if sample_weight.shape[0] > 0 and sample_weight.shape[0] != n_samples: + raise ValueError( + "sample_weight and X have incompatible shapes: " + "%r vs %r\n" + "Note: Sparse matrices cannot be indexed w/" + "boolean masks (use `indices=True` in CV)." + % (sample_weight.shape, X.shape) + ) + + kernel = "precomputed" if callable(self.kernel) else self.kernel + + if kernel == "precomputed": + # unused but needs to be a float for cython code that ignores + # it anyway + self._gamma = 0.0 + elif isinstance(self.gamma, str): + if self.gamma == "scale": + # var = E[X^2] - E[X]^2 if sparse + X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var() + self._gamma = 1.0 / (X.shape[1] * X_var) if X_var != 0 else 1.0 + elif self.gamma == "auto": + self._gamma = 1.0 / X.shape[1] + elif isinstance(self.gamma, Real): + self._gamma = self.gamma + + fit = self._sparse_fit if self._sparse else self._dense_fit + if self.verbose: + print("[LibSVM]", end="") + + seed = rnd.randint(np.iinfo("i").max) + fit(X, y, sample_weight, solver_type, kernel, random_seed=seed) + # see comment on the other call to np.iinfo in this file + + self.shape_fit_ = X.shape if hasattr(X, "shape") else (n_samples,) + + # In binary case, we need to flip the sign of coef, intercept and + # decision function. Use self._intercept_ and self._dual_coef_ + # internally. + self._intercept_ = self.intercept_.copy() + self._dual_coef_ = self.dual_coef_ + if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2: + self.intercept_ *= -1 + self.dual_coef_ = -self.dual_coef_ + + dual_coef = self._dual_coef_.data if self._sparse else self._dual_coef_ + intercept_finiteness = np.isfinite(self._intercept_).all() + dual_coef_finiteness = np.isfinite(dual_coef).all() + if not (intercept_finiteness and dual_coef_finiteness): + raise ValueError( + "The dual coefficients or intercepts are not finite." + " The input data may contain large values and need to be" + " preprocessed." + ) + + # Since, in the case of SVC and NuSVC, the number of models optimized by + # libSVM could be greater than one (depending on the input), `n_iter_` + # stores an ndarray. + # For the other sub-classes (SVR, NuSVR, and OneClassSVM), the number of + # models optimized by libSVM is always one, so `n_iter_` stores an + # integer. + if self._impl in ["c_svc", "nu_svc"]: + self.n_iter_ = self._num_iter + else: + self.n_iter_ = self._num_iter.item() + + return self + + def _validate_targets(self, y): + """Validation of y and class_weight. + + Default implementation for SVR and one-class; overridden in BaseSVC. + """ + return column_or_1d(y, warn=True).astype(np.float64, copy=False) + + def _warn_from_fit_status(self): + assert self.fit_status_ in (0, 1) + if self.fit_status_ == 1: + warnings.warn( + "Solver terminated early (max_iter=%i)." + " Consider pre-processing your data with" + " StandardScaler or MinMaxScaler." + % self.max_iter, + ConvergenceWarning, + ) + + def _dense_fit(self, X, y, sample_weight, solver_type, kernel, random_seed): + if callable(self.kernel): + # you must store a reference to X to compute the kernel in predict + # TODO: add keyword copy to copy on demand + self.__Xfit = X + X = self._compute_kernel(X) + + if X.shape[0] != X.shape[1]: + raise ValueError("X.shape[0] should be equal to X.shape[1]") + + libsvm.set_verbosity_wrap(self.verbose) + + # we don't pass **self.get_params() to allow subclasses to + # add other parameters to __init__ + ( + self.support_, + self.support_vectors_, + self._n_support, + self.dual_coef_, + self.intercept_, + self._probA, + self._probB, + self.fit_status_, + self._num_iter, + ) = libsvm.fit( + X, + y, + svm_type=solver_type, + sample_weight=sample_weight, + class_weight=getattr(self, "class_weight_", np.empty(0)), + kernel=kernel, + C=self.C, + nu=self.nu, + probability=self.probability, + degree=self.degree, + shrinking=self.shrinking, + tol=self.tol, + cache_size=self.cache_size, + coef0=self.coef0, + gamma=self._gamma, + epsilon=self.epsilon, + max_iter=self.max_iter, + random_seed=random_seed, + ) + + self._warn_from_fit_status() + + def _sparse_fit(self, X, y, sample_weight, solver_type, kernel, random_seed): + X.data = np.asarray(X.data, dtype=np.float64, order="C") + X.sort_indices() + + kernel_type = self._sparse_kernels.index(kernel) + + libsvm_sparse.set_verbosity_wrap(self.verbose) + + ( + self.support_, + self.support_vectors_, + dual_coef_data, + self.intercept_, + self._n_support, + self._probA, + self._probB, + self.fit_status_, + self._num_iter, + ) = libsvm_sparse.libsvm_sparse_train( + X.shape[1], + X.data, + X.indices, + X.indptr, + y, + solver_type, + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + self.C, + getattr(self, "class_weight_", np.empty(0)), + sample_weight, + self.nu, + self.cache_size, + self.epsilon, + int(self.shrinking), + int(self.probability), + self.max_iter, + random_seed, + ) + + self._warn_from_fit_status() + + if hasattr(self, "classes_"): + n_class = len(self.classes_) - 1 + else: # regression + n_class = 1 + n_SV = self.support_vectors_.shape[0] + + dual_coef_indices = np.tile(np.arange(n_SV), n_class) + if not n_SV: + self.dual_coef_ = sp.csr_matrix([]) + else: + dual_coef_indptr = np.arange( + 0, dual_coef_indices.size + 1, dual_coef_indices.size / n_class + ) + self.dual_coef_ = sp.csr_matrix( + (dual_coef_data, dual_coef_indices, dual_coef_indptr), (n_class, n_SV) + ) + + def predict(self, X): + """Perform regression on samples in X. + + For an one-class model, +1 (inlier) or -1 (outlier) is returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The predicted values. + """ + X = self._validate_for_predict(X) + predict = self._sparse_predict if self._sparse else self._dense_predict + return predict(X) + + def _dense_predict(self, X): + X = self._compute_kernel(X) + if X.ndim == 1: + X = check_array(X, order="C", accept_large_sparse=False) + + kernel = self.kernel + if callable(self.kernel): + kernel = "precomputed" + if X.shape[1] != self.shape_fit_[0]: + raise ValueError( + "X.shape[1] = %d should be equal to %d, " + "the number of samples at training time" + % (X.shape[1], self.shape_fit_[0]) + ) + + svm_type = LIBSVM_IMPL.index(self._impl) + + return libsvm.predict( + X, + self.support_, + self.support_vectors_, + self._n_support, + self._dual_coef_, + self._intercept_, + self._probA, + self._probB, + svm_type=svm_type, + kernel=kernel, + degree=self.degree, + coef0=self.coef0, + gamma=self._gamma, + cache_size=self.cache_size, + ) + + def _sparse_predict(self, X): + # Precondition: X is a csr_matrix of dtype np.float64. + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + kernel_type = self._sparse_kernels.index(kernel) + + C = 0.0 # C is not useful here + + return libsvm_sparse.libsvm_sparse_predict( + X.data, + X.indices, + X.indptr, + self.support_vectors_.data, + self.support_vectors_.indices, + self.support_vectors_.indptr, + self._dual_coef_.data, + self._intercept_, + LIBSVM_IMPL.index(self._impl), + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + C, + getattr(self, "class_weight_", np.empty(0)), + self.nu, + self.epsilon, + self.shrinking, + self.probability, + self._n_support, + self._probA, + self._probB, + ) + + def _compute_kernel(self, X): + """Return the data transformed by a callable kernel""" + if callable(self.kernel): + # in the case of precomputed kernel given as a function, we + # have to compute explicitly the kernel matrix + kernel = self.kernel(X, self.__Xfit) + if sp.issparse(kernel): + kernel = kernel.toarray() + X = np.asarray(kernel, dtype=np.float64, order="C") + return X + + def _decision_function(self, X): + """Evaluates the decision function for the samples in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + X : array-like of shape (n_samples, n_class * (n_class-1) / 2) + Returns the decision function of the sample for each class + in the model. + """ + # NOTE: _validate_for_predict contains check for is_fitted + # hence must be placed before any other attributes are used. + X = self._validate_for_predict(X) + X = self._compute_kernel(X) + + if self._sparse: + dec_func = self._sparse_decision_function(X) + else: + dec_func = self._dense_decision_function(X) + + # In binary case, we need to flip the sign of coef, intercept and + # decision function. + if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2: + return -dec_func.ravel() + + return dec_func + + def _dense_decision_function(self, X): + X = check_array(X, dtype=np.float64, order="C", accept_large_sparse=False) + + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + return libsvm.decision_function( + X, + self.support_, + self.support_vectors_, + self._n_support, + self._dual_coef_, + self._intercept_, + self._probA, + self._probB, + svm_type=LIBSVM_IMPL.index(self._impl), + kernel=kernel, + degree=self.degree, + cache_size=self.cache_size, + coef0=self.coef0, + gamma=self._gamma, + ) + + def _sparse_decision_function(self, X): + X.data = np.asarray(X.data, dtype=np.float64, order="C") + + kernel = self.kernel + if hasattr(kernel, "__call__"): + kernel = "precomputed" + + kernel_type = self._sparse_kernels.index(kernel) + + return libsvm_sparse.libsvm_sparse_decision_function( + X.data, + X.indices, + X.indptr, + self.support_vectors_.data, + self.support_vectors_.indices, + self.support_vectors_.indptr, + self._dual_coef_.data, + self._intercept_, + LIBSVM_IMPL.index(self._impl), + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + self.C, + getattr(self, "class_weight_", np.empty(0)), + self.nu, + self.epsilon, + self.shrinking, + self.probability, + self._n_support, + self._probA, + self._probB, + ) + + def _validate_for_predict(self, X): + check_is_fitted(self) + + if not callable(self.kernel): + X = self._validate_data( + X, + accept_sparse="csr", + dtype=np.float64, + order="C", + accept_large_sparse=False, + reset=False, + ) + + if self._sparse and not sp.issparse(X): + X = sp.csr_matrix(X) + if self._sparse: + X.sort_indices() + + if sp.issparse(X) and not self._sparse and not callable(self.kernel): + raise ValueError( + "cannot use sparse input in %r trained on dense data" + % type(self).__name__ + ) + + if self.kernel == "precomputed": + if X.shape[1] != self.shape_fit_[0]: + raise ValueError( + "X.shape[1] = %d should be equal to %d, " + "the number of samples at training time" + % (X.shape[1], self.shape_fit_[0]) + ) + # Fixes https://nvd.nist.gov/vuln/detail/CVE-2020-28975 + # Check that _n_support is consistent with support_vectors + sv = self.support_vectors_ + if not self._sparse and sv.size > 0 and self.n_support_.sum() != sv.shape[0]: + raise ValueError( + f"The internal representation of {self.__class__.__name__} was altered" + ) + return X + + @property + def coef_(self): + """Weights assigned to the features when `kernel="linear"`. + + Returns + ------- + ndarray of shape (n_features, n_classes) + """ + if self.kernel != "linear": + raise AttributeError("coef_ is only available when using a linear kernel") + + coef = self._get_coef() + + # coef_ being a read-only property, it's better to mark the value as + # immutable to avoid hiding potential bugs for the unsuspecting user. + if sp.issparse(coef): + # sparse matrix do not have global flags + coef.data.flags.writeable = False + else: + # regular dense array + coef.flags.writeable = False + return coef + + def _get_coef(self): + return safe_sparse_dot(self._dual_coef_, self.support_vectors_) + + @property + def n_support_(self): + """Number of support vectors for each class.""" + try: + check_is_fitted(self) + except NotFittedError: + raise AttributeError + + svm_type = LIBSVM_IMPL.index(self._impl) + if svm_type in (0, 1): + return self._n_support + else: + # SVR and OneClass + # _n_support has size 2, we make it size 1 + return np.array([self._n_support[0]]) + + +class BaseSVC(ClassifierMixin, BaseLibSVM, metaclass=ABCMeta): + """ABC for LibSVM-based classifiers.""" + + _parameter_constraints: dict = { + **BaseLibSVM._parameter_constraints, + "decision_function_shape": [StrOptions({"ovr", "ovo"})], + "break_ties": ["boolean"], + } + for unused_param in ["epsilon", "nu"]: + _parameter_constraints.pop(unused_param) + + @abstractmethod + def __init__( + self, + kernel, + degree, + gamma, + coef0, + tol, + C, + nu, + shrinking, + probability, + cache_size, + class_weight, + verbose, + max_iter, + decision_function_shape, + random_state, + break_ties, + ): + self.decision_function_shape = decision_function_shape + self.break_ties = break_ties + super().__init__( + kernel=kernel, + degree=degree, + gamma=gamma, + coef0=coef0, + tol=tol, + C=C, + nu=nu, + epsilon=0.0, + shrinking=shrinking, + probability=probability, + cache_size=cache_size, + class_weight=class_weight, + verbose=verbose, + max_iter=max_iter, + random_state=random_state, + ) + + def _validate_targets(self, y): + y_ = column_or_1d(y, warn=True) + check_classification_targets(y) + cls, y = np.unique(y_, return_inverse=True) + self.class_weight_ = compute_class_weight(self.class_weight, classes=cls, y=y_) + if len(cls) < 2: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % len(cls) + ) + + self.classes_ = cls + + return np.asarray(y, dtype=np.float64, order="C") + + def decision_function(self, X): + """Evaluate the decision function for the samples in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2) + Returns the decision function of the sample for each class + in the model. + If decision_function_shape='ovr', the shape is (n_samples, + n_classes). + + Notes + ----- + If decision_function_shape='ovo', the function values are proportional + to the distance of the samples X to the separating hyperplane. If the + exact distances are required, divide the function values by the norm of + the weight vector (``coef_``). See also `this question + `_ for further details. + If decision_function_shape='ovr', the decision function is a monotonic + transformation of ovo decision function. + """ + dec = self._decision_function(X) + if self.decision_function_shape == "ovr" and len(self.classes_) > 2: + return _ovr_decision_function(dec < 0, -dec, len(self.classes_)) + return dec + + def predict(self, X): + """Perform classification on samples in X. + + For an one-class model, +1 or -1 is returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples_test, n_samples_train) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Class labels for samples in X. + """ + check_is_fitted(self) + if self.break_ties and self.decision_function_shape == "ovo": + raise ValueError( + "break_ties must be False when decision_function_shape is 'ovo'" + ) + + if ( + self.break_ties + and self.decision_function_shape == "ovr" + and len(self.classes_) > 2 + ): + y = np.argmax(self.decision_function(X), axis=1) + else: + y = super().predict(X) + return self.classes_.take(np.asarray(y, dtype=np.intp)) + + # Hacky way of getting predict_proba to raise an AttributeError when + # probability=False using properties. Do not use this in new code; when + # probabilities are not available depending on a setting, introduce two + # estimators. + def _check_proba(self): + if not self.probability: + raise AttributeError( + "predict_proba is not available when probability=False" + ) + if self._impl not in ("c_svc", "nu_svc"): + raise AttributeError("predict_proba only implemented for SVC and NuSVC") + return True + + @available_if(_check_proba) + def predict_proba(self, X): + """Compute probabilities of possible outcomes for samples in X. + + The model needs to have probability information computed at training + time: fit with attribute `probability` set to True. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + T : ndarray of shape (n_samples, n_classes) + Returns the probability of the sample for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + + Notes + ----- + The probability model is created using cross validation, so + the results can be slightly different than those obtained by + predict. Also, it will produce meaningless results on very small + datasets. + """ + X = self._validate_for_predict(X) + if self.probA_.size == 0 or self.probB_.size == 0: + raise NotFittedError( + "predict_proba is not available when fitted with probability=False" + ) + pred_proba = ( + self._sparse_predict_proba if self._sparse else self._dense_predict_proba + ) + return pred_proba(X) + + @available_if(_check_proba) + def predict_log_proba(self, X): + """Compute log probabilities of possible outcomes for samples in X. + + The model need to have probability information computed at training + time: fit with attribute `probability` set to True. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples_test, n_samples_train) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + T : ndarray of shape (n_samples, n_classes) + Returns the log-probabilities of the sample for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + + Notes + ----- + The probability model is created using cross validation, so + the results can be slightly different than those obtained by + predict. Also, it will produce meaningless results on very small + datasets. + """ + return np.log(self.predict_proba(X)) + + def _dense_predict_proba(self, X): + X = self._compute_kernel(X) + + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + svm_type = LIBSVM_IMPL.index(self._impl) + pprob = libsvm.predict_proba( + X, + self.support_, + self.support_vectors_, + self._n_support, + self._dual_coef_, + self._intercept_, + self._probA, + self._probB, + svm_type=svm_type, + kernel=kernel, + degree=self.degree, + cache_size=self.cache_size, + coef0=self.coef0, + gamma=self._gamma, + ) + + return pprob + + def _sparse_predict_proba(self, X): + X.data = np.asarray(X.data, dtype=np.float64, order="C") + + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + kernel_type = self._sparse_kernels.index(kernel) + + return libsvm_sparse.libsvm_sparse_predict_proba( + X.data, + X.indices, + X.indptr, + self.support_vectors_.data, + self.support_vectors_.indices, + self.support_vectors_.indptr, + self._dual_coef_.data, + self._intercept_, + LIBSVM_IMPL.index(self._impl), + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + self.C, + getattr(self, "class_weight_", np.empty(0)), + self.nu, + self.epsilon, + self.shrinking, + self.probability, + self._n_support, + self._probA, + self._probB, + ) + + def _get_coef(self): + if self.dual_coef_.shape[0] == 1: + # binary classifier + coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_) + else: + # 1vs1 classifier + coef = _one_vs_one_coef( + self.dual_coef_, self._n_support, self.support_vectors_ + ) + if sp.issparse(coef[0]): + coef = sp.vstack(coef).tocsr() + else: + coef = np.vstack(coef) + + return coef + + @property + def probA_(self): + """Parameter learned in Platt scaling when `probability=True`. + + Returns + ------- + ndarray of shape (n_classes * (n_classes - 1) / 2) + """ + return self._probA + + @property + def probB_(self): + """Parameter learned in Platt scaling when `probability=True`. + + Returns + ------- + ndarray of shape (n_classes * (n_classes - 1) / 2) + """ + return self._probB + + +def _get_liblinear_solver_type(multi_class, penalty, loss, dual): + """Find the liblinear magic number for the solver. + + This number depends on the values of the following attributes: + - multi_class + - penalty + - loss + - dual + + The same number is also internally used by LibLinear to determine + which solver to use. + """ + # nested dicts containing level 1: available loss functions, + # level2: available penalties for the given loss function, + # level3: whether the dual solver is available for the specified + # combination of loss function and penalty + _solver_type_dict = { + "logistic_regression": {"l1": {False: 6}, "l2": {False: 0, True: 7}}, + "hinge": {"l2": {True: 3}}, + "squared_hinge": {"l1": {False: 5}, "l2": {False: 2, True: 1}}, + "epsilon_insensitive": {"l2": {True: 13}}, + "squared_epsilon_insensitive": {"l2": {False: 11, True: 12}}, + "crammer_singer": 4, + } + + if multi_class == "crammer_singer": + return _solver_type_dict[multi_class] + elif multi_class != "ovr": + raise ValueError( + "`multi_class` must be one of `ovr`, `crammer_singer`, got %r" % multi_class + ) + + _solver_pen = _solver_type_dict.get(loss, None) + if _solver_pen is None: + error_string = "loss='%s' is not supported" % loss + else: + _solver_dual = _solver_pen.get(penalty, None) + if _solver_dual is None: + error_string = ( + "The combination of penalty='%s' and loss='%s' is not supported" + % (penalty, loss) + ) + else: + solver_num = _solver_dual.get(dual, None) + if solver_num is None: + error_string = ( + "The combination of penalty='%s' and " + "loss='%s' are not supported when dual=%s" % (penalty, loss, dual) + ) + else: + return solver_num + raise ValueError( + "Unsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r" + % (error_string, penalty, loss, dual) + ) + + +def _fit_liblinear( + X, + y, + C, + fit_intercept, + intercept_scaling, + class_weight, + penalty, + dual, + verbose, + max_iter, + tol, + random_state=None, + multi_class="ovr", + loss="logistic_regression", + epsilon=0.1, + sample_weight=None, +): + """Used by Logistic Regression (and CV) and LinearSVC/LinearSVR. + + Preprocessing is done in this function before supplying it to liblinear. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X + + C : float + Inverse of cross-validation parameter. The lower the C, the higher + the penalization. + + fit_intercept : bool + Whether or not to fit an intercept. If set to True, the feature vector + is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where + 1 corresponds to the intercept. If set to False, no intercept will be + used in calculations (i.e. data is expected to be already centered). + + intercept_scaling : float + Liblinear internally penalizes the intercept, treating it like any + other term in the feature vector. To reduce the impact of the + regularization on the intercept, the `intercept_scaling` parameter can + be set to a value greater than 1; the higher the value of + `intercept_scaling`, the lower the impact of regularization on it. + Then, the weights become `[w_x_1, ..., w_x_n, + w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent + the feature weights and the intercept weight is scaled by + `intercept_scaling`. This scaling allows the intercept term to have a + different regularization behavior compared to the other features. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + penalty : {'l1', 'l2'} + The norm of the penalty used in regularization. + + dual : bool + Dual or primal formulation, + + verbose : int + Set verbose to any positive number for verbosity. + + max_iter : int + Number of iterations. + + tol : float + Stopping condition. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generation for shuffling the data. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + multi_class : {'ovr', 'crammer_singer'}, default='ovr' + `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer` + optimizes a joint objective over all classes. + While `crammer_singer` is interesting from an theoretical perspective + as it is consistent it is seldom used in practice and rarely leads to + better accuracy and is more expensive to compute. + If `crammer_singer` is chosen, the options loss, penalty and dual will + be ignored. + + loss : {'logistic_regression', 'hinge', 'squared_hinge', \ + 'epsilon_insensitive', 'squared_epsilon_insensitive}, \ + default='logistic_regression' + The loss function used to fit the model. + + epsilon : float, default=0.1 + Epsilon parameter in the epsilon-insensitive loss function. Note + that the value of this parameter depends on the scale of the target + variable y. If unsure, set epsilon=0. + + sample_weight : array-like of shape (n_samples,), default=None + Weights assigned to each sample. + + Returns + ------- + coef_ : ndarray of shape (n_features, n_features + 1) + The coefficient vector got by minimizing the objective function. + + intercept_ : float + The intercept term added to the vector. + + n_iter_ : array of int + Number of iterations run across for each class. + """ + if loss not in ["epsilon_insensitive", "squared_epsilon_insensitive"]: + enc = LabelEncoder() + y_ind = enc.fit_transform(y) + classes_ = enc.classes_ + if len(classes_) < 2: + raise ValueError( + "This solver needs samples of at least 2 classes" + " in the data, but the data contains only one" + " class: %r" + % classes_[0] + ) + + class_weight_ = compute_class_weight(class_weight, classes=classes_, y=y) + else: + class_weight_ = np.empty(0, dtype=np.float64) + y_ind = y + liblinear.set_verbosity_wrap(verbose) + rnd = check_random_state(random_state) + if verbose: + print("[LibLinear]", end="") + + # LinearSVC breaks when intercept_scaling is <= 0 + bias = -1.0 + if fit_intercept: + if intercept_scaling <= 0: + raise ValueError( + "Intercept scaling is %r but needs to be greater " + "than 0. To disable fitting an intercept," + " set fit_intercept=False." % intercept_scaling + ) + else: + bias = intercept_scaling + + libsvm.set_verbosity_wrap(verbose) + libsvm_sparse.set_verbosity_wrap(verbose) + liblinear.set_verbosity_wrap(verbose) + + # Liblinear doesn't support 64bit sparse matrix indices yet + if sp.issparse(X): + _check_large_sparse(X) + + # LibLinear wants targets as doubles, even for classification + y_ind = np.asarray(y_ind, dtype=np.float64).ravel() + y_ind = np.require(y_ind, requirements="W") + + sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64) + + solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual) + raw_coef_, n_iter_ = liblinear.train_wrap( + X, + y_ind, + sp.issparse(X), + solver_type, + tol, + bias, + C, + class_weight_, + max_iter, + rnd.randint(np.iinfo("i").max), + epsilon, + sample_weight, + ) + # Regarding rnd.randint(..) in the above signature: + # seed for srand in range [0..INT_MAX); due to limitations in Numpy + # on 32-bit platforms, we can't get to the UINT_MAX limit that + # srand supports + n_iter_max = max(n_iter_) + if n_iter_max >= max_iter: + warnings.warn( + "Liblinear failed to converge, increase the number of iterations.", + ConvergenceWarning, + ) + + if fit_intercept: + coef_ = raw_coef_[:, :-1] + intercept_ = intercept_scaling * raw_coef_[:, -1] + else: + coef_ = raw_coef_ + intercept_ = 0.0 + + return coef_, intercept_, n_iter_ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_bounds.py b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_bounds.py new file mode 100644 index 0000000000000000000000000000000000000000..d14297230af4cd8a73428825c7dc8f93ccc71d72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_bounds.py @@ -0,0 +1,94 @@ +"""Determination of parameter bounds""" +# Author: Paolo Losi +# License: BSD 3 clause + +from numbers import Real + +import numpy as np + +from ..preprocessing import LabelBinarizer +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import safe_sparse_dot +from ..utils.validation import check_array, check_consistent_length + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "loss": [StrOptions({"squared_hinge", "log"})], + "fit_intercept": ["boolean"], + "intercept_scaling": [Interval(Real, 0, None, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0): + """Return the lowest bound for C. + + The lower bound for C is computed such that for C in (l1_min_C, infinity) + the model is guaranteed not to be empty. This applies to l1 penalized + classifiers, such as LinearSVC with penalty='l1' and + linear_model.LogisticRegression with penalty='l1'. + + This value is valid if class_weight parameter in fit() is not set. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + loss : {'squared_hinge', 'log'}, default='squared_hinge' + Specifies the loss function. + With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss). + With 'log' it is the loss of logistic regression models. + + fit_intercept : bool, default=True + Specifies if the intercept should be fitted by the model. + It must match the fit() method parameter. + + intercept_scaling : float, default=1.0 + When fit_intercept is True, instance vector x becomes + [x, intercept_scaling], + i.e. a "synthetic" feature with constant value equals to + intercept_scaling is appended to the instance vector. + It must match the fit() method parameter. + + Returns + ------- + l1_min_c : float + Minimum value for C. + + Examples + -------- + >>> from sklearn.svm import l1_min_c + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=100, n_features=20, random_state=42) + >>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}") + 0.0044 + """ + + X = check_array(X, accept_sparse="csc") + check_consistent_length(X, y) + + Y = LabelBinarizer(neg_label=-1).fit_transform(y).T + # maximum absolute value over classes and features + den = np.max(np.abs(safe_sparse_dot(Y, X))) + if fit_intercept: + bias = np.full( + (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype + ) + den = max(den, abs(np.dot(Y, bias)).max()) + + if den == 0.0: + raise ValueError( + "Ill-posed l1_min_c calculation: l1 will always " + "select zero coefficients for this data" + ) + if loss == "squared_hinge": + return 0.5 / den + else: # loss == 'log': + return 2.0 / den diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9ce1513a4dce3cec377fdd6816edb7e8dc9b889d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9746860ccffde396da07680ab20d6cd78768bf26 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ac62d63665d36a72cf79dac40c43b42a29ae7d8e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..efef1d1e500d6fc4de114fdbfd3ddd1c61a403e3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f119491712d1531ae67b5ddaad14a0fe173664d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8da1c0419b88597977408947fc265bf305fc11f0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57b2aa769fd1e4ac93b52c4684660cf0c4976e29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf88dde42aa0352e066692d9f7e6480f5e7c4ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest +from scipy import stats + +from sklearn.linear_model import LogisticRegression +from sklearn.svm import LinearSVC +from sklearn.svm._bounds import l1_min_c +from sklearn.svm._newrand import bounded_rand_int_wrap, set_seed_wrap +from sklearn.utils.fixes import CSR_CONTAINERS + +dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]] + +Y1 = [0, 1, 1, 1] +Y2 = [2, 1, 0, 0] + + +@pytest.mark.parametrize("X_container", CSR_CONTAINERS + [np.array]) +@pytest.mark.parametrize("loss", ["squared_hinge", "log"]) +@pytest.mark.parametrize("Y_label", ["two-classes", "multi-class"]) +@pytest.mark.parametrize("intercept_label", ["no-intercept", "fit-intercept"]) +def test_l1_min_c(X_container, loss, Y_label, intercept_label): + Ys = {"two-classes": Y1, "multi-class": Y2} + intercepts = { + "no-intercept": {"fit_intercept": False}, + "fit-intercept": {"fit_intercept": True, "intercept_scaling": 10}, + } + + X = X_container(dense_X) + Y = Ys[Y_label] + intercept_params = intercepts[intercept_label] + check_l1_min_c(X, Y, loss, **intercept_params) + + +def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=1.0): + min_c = l1_min_c( + X, + y, + loss=loss, + fit_intercept=fit_intercept, + intercept_scaling=intercept_scaling, + ) + + clf = { + "log": LogisticRegression(penalty="l1", solver="liblinear"), + "squared_hinge": LinearSVC(loss="squared_hinge", penalty="l1", dual=False), + }[loss] + + clf.fit_intercept = fit_intercept + clf.intercept_scaling = intercept_scaling + + clf.C = min_c + clf.fit(X, y) + assert (np.asarray(clf.coef_) == 0).all() + assert (np.asarray(clf.intercept_) == 0).all() + + clf.C = min_c * 1.01 + clf.fit(X, y) + assert (np.asarray(clf.coef_) != 0).any() or (np.asarray(clf.intercept_) != 0).any() + + +def test_ill_posed_min_c(): + X = [[0, 0], [0, 0]] + y = [0, 1] + with pytest.raises(ValueError): + l1_min_c(X, y) + + +_MAX_UNSIGNED_INT = 4294967295 + + +def test_newrand_default(): + """Test that bounded_rand_int_wrap without seeding respects the range + + Note this test should pass either if executed alone, or in conjunctions + with other tests that call set_seed explicit in any order: it checks + invariants on the RNG instead of specific values. + """ + generated = [bounded_rand_int_wrap(100) for _ in range(10)] + assert all(0 <= x < 100 for x in generated) + assert not all(x == generated[0] for x in generated) + + +@pytest.mark.parametrize("seed, expected", [(0, 54), (_MAX_UNSIGNED_INT, 9)]) +def test_newrand_set_seed(seed, expected): + """Test that `set_seed` produces deterministic results""" + set_seed_wrap(seed) + generated = bounded_rand_int_wrap(100) + assert generated == expected + + +@pytest.mark.parametrize("seed", [-1, _MAX_UNSIGNED_INT + 1]) +def test_newrand_set_seed_overflow(seed): + """Test that `set_seed_wrap` is defined for unsigned 32bits ints""" + with pytest.raises(OverflowError): + set_seed_wrap(seed) + + +@pytest.mark.parametrize("range_, n_pts", [(_MAX_UNSIGNED_INT, 10000), (100, 25)]) +def test_newrand_bounded_rand_int(range_, n_pts): + """Test that `bounded_rand_int` follows a uniform distribution""" + # XXX: this test is very seed sensitive: either it is wrong (too strict?) + # or the wrapped RNG is not uniform enough, at least on some platforms. + set_seed_wrap(42) + n_iter = 100 + ks_pvals = [] + uniform_dist = stats.uniform(loc=0, scale=range_) + # perform multiple samplings to make chance of outlier sampling negligible + for _ in range(n_iter): + # Deterministic random sampling + sample = [bounded_rand_int_wrap(range_) for _ in range(n_pts)] + res = stats.kstest(sample, uniform_dist.cdf) + ks_pvals.append(res.pvalue) + # Null hypothesis = samples come from an uniform distribution. + # Under the null hypothesis, p-values should be uniformly distributed + # and not concentrated on low values + # (this may seem counter-intuitive but is backed by multiple refs) + # So we can do two checks: + + # (1) check uniformity of p-values + uniform_p_vals_dist = stats.uniform(loc=0, scale=1) + res_pvals = stats.kstest(ks_pvals, uniform_p_vals_dist.cdf) + assert res_pvals.pvalue > 0.05, ( + "Null hypothesis rejected: generated random numbers are not uniform." + " Details: the (meta) p-value of the test of uniform distribution" + f" of p-values is {res_pvals.pvalue} which is not > 0.05" + ) + + # (2) (safety belt) check that 90% of p-values are above 0.05 + min_10pct_pval = np.percentile(ks_pvals, q=10) + # lower 10th quantile pvalue <= 0.05 means that the test rejects the + # null hypothesis that the sample came from the uniform distribution + assert min_10pct_pval > 0.05, ( + "Null hypothesis rejected: generated random numbers are not uniform. " + f"Details: lower 10th quantile p-value of {min_10pct_pval} not > 0.05." + ) + + +@pytest.mark.parametrize("range_", [-1, _MAX_UNSIGNED_INT + 1]) +def test_newrand_bounded_rand_int_limits(range_): + """Test that `bounded_rand_int_wrap` is defined for unsigned 32bits ints""" + with pytest.raises(OverflowError): + bounded_rand_int_wrap(range_) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e517fdce8932c8161832eede7dd1aaef2a2108 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py @@ -0,0 +1,493 @@ +import numpy as np +import pytest +from scipy import sparse + +from sklearn import base, datasets, linear_model, svm +from sklearn.datasets import load_digits, make_blobs, make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.svm.tests import test_svm +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, + skip_if_32bit, +) +from sklearn.utils.extmath import safe_sparse_dot +from sklearn.utils.fixes import ( + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +# test sample 1 +X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) +Y = [1, 1, 1, 2, 2, 2] +T = np.array([[-1, -1], [2, 2], [3, 2]]) +true_result = [1, 2, 2] + +# test sample 2 +X2 = np.array( + [ + [0, 0, 0], + [1, 1, 1], + [2, 0, 0], + [0, 0, 2], + [3, 3, 3], + ] +) +Y2 = [1, 2, 2, 2, 3] +T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]]) +true_result2 = [1, 2, 3] + +iris = datasets.load_iris() +rng = np.random.RandomState(0) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0) + + +def check_svm_model_equal(dense_svm, X_train, y_train, X_test): + # Use the original svm model for dense fit and clone an exactly same + # svm model for sparse fit + sparse_svm = base.clone(dense_svm) + + dense_svm.fit(X_train.toarray(), y_train) + if sparse.issparse(X_test): + X_test_dense = X_test.toarray() + else: + X_test_dense = X_test + sparse_svm.fit(X_train, y_train) + assert sparse.issparse(sparse_svm.support_vectors_) + assert sparse.issparse(sparse_svm.dual_coef_) + assert_allclose(dense_svm.support_vectors_, sparse_svm.support_vectors_.toarray()) + assert_allclose(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray()) + if dense_svm.kernel == "linear": + assert sparse.issparse(sparse_svm.coef_) + assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray()) + assert_allclose(dense_svm.support_, sparse_svm.support_) + assert_allclose(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test)) + + assert_array_almost_equal( + dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test) + ) + assert_array_almost_equal( + dense_svm.decision_function(X_test_dense), + sparse_svm.decision_function(X_test_dense), + ) + if isinstance(dense_svm, svm.OneClassSVM): + msg = "cannot use sparse input in 'OneClassSVM' trained on dense data" + else: + assert_array_almost_equal( + dense_svm.predict_proba(X_test_dense), + sparse_svm.predict_proba(X_test), + decimal=4, + ) + msg = "cannot use sparse input in 'SVC' trained on dense data" + if sparse.issparse(X_test): + with pytest.raises(ValueError, match=msg): + dense_svm.predict(X_test) + + +@skip_if_32bit +@pytest.mark.parametrize( + "X_train, y_train, X_test", + [ + [X, Y, T], + [X2, Y2, T2], + [X_blobs[:80], y_blobs[:80], X_blobs[80:]], + [iris.data, iris.target, iris.data], + ], +) +@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS) +def test_svc(X_train, y_train, X_test, kernel, sparse_container): + """Check that sparse SVC gives the same result as SVC.""" + X_train = sparse_container(X_train) + + clf = svm.SVC( + gamma=1, + kernel=kernel, + probability=True, + random_state=0, + decision_function_shape="ovo", + ) + check_svm_model_equal(clf, X_train, y_train, X_test) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_unsorted_indices(csr_container): + # test that the result with sorted and unsorted indices in csr is the same + # we use a subset of digits as iris, blobs or make_classification didn't + # show the problem + X, y = load_digits(return_X_y=True) + X_test = csr_container(X[50:100]) + X, y = X[:50], y[:50] + + X_sparse = csr_container(X) + coef_dense = ( + svm.SVC(kernel="linear", probability=True, random_state=0).fit(X, y).coef_ + ) + sparse_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit( + X_sparse, y + ) + coef_sorted = sparse_svc.coef_ + # make sure dense and sparse SVM give the same result + assert_allclose(coef_dense, coef_sorted.toarray()) + + # reverse each row's indices + def scramble_indices(X): + new_data = [] + new_indices = [] + for i in range(1, len(X.indptr)): + row_slice = slice(*X.indptr[i - 1 : i + 1]) + new_data.extend(X.data[row_slice][::-1]) + new_indices.extend(X.indices[row_slice][::-1]) + return csr_container((new_data, new_indices, X.indptr), shape=X.shape) + + X_sparse_unsorted = scramble_indices(X_sparse) + X_test_unsorted = scramble_indices(X_test) + + assert not X_sparse_unsorted.has_sorted_indices + assert not X_test_unsorted.has_sorted_indices + + unsorted_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit( + X_sparse_unsorted, y + ) + coef_unsorted = unsorted_svc.coef_ + # make sure unsorted indices give same result + assert_allclose(coef_unsorted.toarray(), coef_sorted.toarray()) + assert_allclose( + sparse_svc.predict_proba(X_test_unsorted), sparse_svc.predict_proba(X_test) + ) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_svc_with_custom_kernel(lil_container): + def kfunc(x, y): + return safe_sparse_dot(x, y.T) + + X_sp = lil_container(X) + clf_lin = svm.SVC(kernel="linear").fit(X_sp, Y) + clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y) + assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp)) + + +@skip_if_32bit +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf"]) +def test_svc_iris(csr_container, kernel): + # Test the sparse SVC with the iris dataset + iris_data_sp = csr_container(iris.data) + + sp_clf = svm.SVC(kernel=kernel).fit(iris_data_sp, iris.target) + clf = svm.SVC(kernel=kernel).fit(iris.data, iris.target) + + assert_allclose(clf.support_vectors_, sp_clf.support_vectors_.toarray()) + assert_allclose(clf.dual_coef_, sp_clf.dual_coef_.toarray()) + assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp)) + if kernel == "linear": + assert_allclose(clf.coef_, sp_clf.coef_.toarray()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_decision_function(csr_container): + # Test decision_function + + # Sanity check, test that decision_function implemented in python + # returns the same as the one in libsvm + + # multi class: + iris_data_sp = csr_container(iris.data) + svc = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo") + clf = svc.fit(iris_data_sp, iris.target) + + dec = safe_sparse_dot(iris_data_sp, clf.coef_.T) + clf.intercept_ + + assert_allclose(dec, clf.decision_function(iris_data_sp)) + + # binary: + clf.fit(X, Y) + dec = np.dot(X, clf.coef_.T) + clf.intercept_ + prediction = clf.predict(X) + assert_allclose(dec.ravel(), clf.decision_function(X)) + assert_allclose( + prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int).ravel()] + ) + expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0]) + assert_array_almost_equal(clf.decision_function(X), expected, decimal=2) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_error(lil_container): + # Test that it gives proper exception on deficient input + clf = svm.SVC() + X_sp = lil_container(X) + + Y2 = Y[:-1] # wrong dimensions for labels + with pytest.raises(ValueError): + clf.fit(X_sp, Y2) + + clf.fit(X_sp, Y) + assert_array_equal(clf.predict(T), true_result) + + +@pytest.mark.parametrize( + "lil_container, dok_container", zip(LIL_CONTAINERS, DOK_CONTAINERS) +) +def test_linearsvc(lil_container, dok_container): + # Similar to test_SVC + X_sp = lil_container(X) + X2_sp = dok_container(X2) + + clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y) + sp_clf = svm.LinearSVC(dual="auto", random_state=0).fit(X_sp, Y) + + assert sp_clf.fit_intercept + + assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) + assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) + + assert_allclose(clf.predict(X), sp_clf.predict(X_sp)) + + clf.fit(X2, Y2) + sp_clf.fit(X2_sp, Y2) + + assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) + assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_linearsvc_iris(csr_container): + # Test the sparse LinearSVC with the iris dataset + iris_data_sp = csr_container(iris.data) + + sp_clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris_data_sp, iris.target) + clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, iris.target) + + assert clf.fit_intercept == sp_clf.fit_intercept + + assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1) + assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1) + assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp)) + + # check decision_function + pred = np.argmax(sp_clf.decision_function(iris_data_sp), axis=1) + assert_allclose(pred, clf.predict(iris.data)) + + # sparsify the coefficients on both models and check that they still + # produce the same results + clf.sparsify() + assert_array_equal(pred, clf.predict(iris_data_sp)) + sp_clf.sparsify() + assert_array_equal(pred, sp_clf.predict(iris_data_sp)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_weight(csr_container): + # Test class weights + X_, y_ = make_classification( + n_samples=200, n_features=100, weights=[0.833, 0.167], random_state=0 + ) + + X_ = csr_container(X_) + for clf in ( + linear_model.LogisticRegression(), + svm.LinearSVC(dual="auto", random_state=0), + svm.SVC(), + ): + clf.set_params(class_weight={0: 5}) + clf.fit(X_[:180], y_[:180]) + y_pred = clf.predict(X_[180:]) + assert np.sum(y_pred == y_[180:]) >= 11 + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_sample_weights(lil_container): + # Test weights on individual samples + X_sp = lil_container(X) + + clf = svm.SVC() + clf.fit(X_sp, Y) + assert_array_equal(clf.predict([X[2]]), [1.0]) + + sample_weight = [0.1] * 3 + [10] * 3 + clf.fit(X_sp, Y, sample_weight=sample_weight) + assert_array_equal(clf.predict([X[2]]), [2.0]) + + +def test_sparse_liblinear_intercept_handling(): + # Test that sparse liblinear honours intercept_scaling param + test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC) + + +@pytest.mark.parametrize( + "X_train, y_train, X_test", + [ + [X, None, T], + [X2, None, T2], + [X_blobs[:80], None, X_blobs[80:]], + [iris.data, None, iris.data], + ], +) +@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS) +@skip_if_32bit +def test_sparse_oneclasssvm(X_train, y_train, X_test, kernel, sparse_container): + # Check that sparse OneClassSVM gives the same result as dense OneClassSVM + X_train = sparse_container(X_train) + + clf = svm.OneClassSVM(gamma=1, kernel=kernel) + check_svm_model_equal(clf, X_train, y_train, X_test) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_realdata(csr_container): + # Test on a subset from the 20newsgroups dataset. + # This catches some bugs if input is not correctly converted into + # sparse format or weights are not correctly initialized. + data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069]) + + # SVC does not support large sparse, so we specify int32 indices + # In this case, `csr_matrix` automatically uses int32 regardless of the dtypes of + # `indices` and `indptr` but `csr_array` may or may not use the same dtype as + # `indices` and `indptr`, which would be int64 if not specified + indices = np.array([6, 5, 35, 31], dtype=np.int32) + indptr = np.array([0] * 8 + [1] * 32 + [2] * 38 + [4] * 3, dtype=np.int32) + + X = csr_container((data, indices, indptr)) + y = np.array( + [ + 1.0, + 0.0, + 2.0, + 2.0, + 1.0, + 1.0, + 1.0, + 2.0, + 2.0, + 0.0, + 1.0, + 2.0, + 2.0, + 0.0, + 2.0, + 0.0, + 3.0, + 0.0, + 3.0, + 0.0, + 1.0, + 1.0, + 3.0, + 2.0, + 3.0, + 2.0, + 0.0, + 3.0, + 1.0, + 0.0, + 2.0, + 1.0, + 2.0, + 0.0, + 1.0, + 0.0, + 2.0, + 3.0, + 1.0, + 3.0, + 0.0, + 1.0, + 0.0, + 0.0, + 2.0, + 0.0, + 1.0, + 2.0, + 2.0, + 2.0, + 3.0, + 2.0, + 0.0, + 3.0, + 2.0, + 1.0, + 2.0, + 3.0, + 2.0, + 2.0, + 0.0, + 1.0, + 0.0, + 1.0, + 2.0, + 3.0, + 0.0, + 0.0, + 2.0, + 2.0, + 1.0, + 3.0, + 1.0, + 1.0, + 0.0, + 1.0, + 2.0, + 1.0, + 1.0, + 3.0, + ] + ) + + clf = svm.SVC(kernel="linear").fit(X.toarray(), y) + sp_clf = svm.SVC(kernel="linear").fit(X.tocoo(), y) + + assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) + assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_sparse_svc_clone_with_callable_kernel(lil_container): + # Test that the "dense_fit" is called even though we use sparse input + # meaning that everything works fine. + a = svm.SVC(C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0) + b = base.clone(a) + + X_sp = lil_container(X) + b.fit(X_sp, Y) + pred = b.predict(X_sp) + b.predict_proba(X_sp) + + dense_svm = svm.SVC( + C=1, kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0 + ) + pred_dense = dense_svm.fit(X, Y).predict(X) + assert_array_equal(pred_dense, pred) + # b.decision_function(X_sp) # XXX : should be supported + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_timeout(lil_container): + sp = svm.SVC( + C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0, max_iter=1 + ) + warning_msg = ( + r"Solver terminated early \(max_iter=1\). Consider pre-processing " + r"your data with StandardScaler or MinMaxScaler." + ) + with pytest.warns(ConvergenceWarning, match=warning_msg): + sp.fit(lil_container(X), Y) + + +def test_consistent_proba(): + a = svm.SVC(probability=True, max_iter=1, random_state=0) + with ignore_warnings(category=ConvergenceWarning): + proba_1 = a.fit(X, Y).predict_proba(X) + a = svm.SVC(probability=True, max_iter=1, random_state=0) + with ignore_warnings(category=ConvergenceWarning): + proba_2 = a.fit(X, Y).predict_proba(X) + assert_allclose(proba_1, proba_2) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py new file mode 100644 index 0000000000000000000000000000000000000000..e1c6e36af28fb5da770fe6aa77191ec76a66b99b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py @@ -0,0 +1,1434 @@ +""" +Testing for Support Vector Machine module (sklearn.svm) + +TODO: remove hard coded numerical results when possible +""" +import re + +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +from sklearn import base, datasets, linear_model, metrics, svm +from sklearn.datasets import make_blobs, make_classification +from sklearn.exceptions import ( + ConvergenceWarning, + NotFittedError, + UndefinedMetricWarning, +) +from sklearn.metrics import f1_score +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import train_test_split +from sklearn.multiclass import OneVsRestClassifier + +# mypy error: Module 'sklearn.svm' has no attribute '_libsvm' +from sklearn.svm import ( # type: ignore + SVR, + LinearSVC, + LinearSVR, + NuSVR, + OneClassSVM, + _libsvm, +) +from sklearn.svm._classes import _validate_dual_parameter +from sklearn.utils import check_random_state, shuffle +from sklearn.utils._testing import ignore_warnings +from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS +from sklearn.utils.validation import _num_samples + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +Y = [1, 1, 1, 2, 2, 2] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [1, 2, 2] + +# also load the iris dataset +iris = datasets.load_iris() +rng = check_random_state(42) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + + +def test_libsvm_parameters(): + # Test parameters on classes that make use of libsvm. + clf = svm.SVC(kernel="linear").fit(X, Y) + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.support_, [1, 3]) + assert_array_equal(clf.support_vectors_, (X[1], X[3])) + assert_array_equal(clf.intercept_, [0.0]) + assert_array_equal(clf.predict(X), Y) + + +def test_libsvm_iris(): + # Check consistency on dataset iris. + + # shuffle the dataset so that labels are not ordered + for k in ("linear", "rbf"): + clf = svm.SVC(kernel=k).fit(iris.data, iris.target) + assert np.mean(clf.predict(iris.data) == iris.target) > 0.9 + assert hasattr(clf, "coef_") == (k == "linear") + + assert_array_equal(clf.classes_, np.sort(clf.classes_)) + + # check also the low-level API + # We unpack the values to create a dictionary with some of the return values + # from Libsvm's fit. + ( + libsvm_support, + libsvm_support_vectors, + libsvm_n_class_SV, + libsvm_sv_coef, + libsvm_intercept, + libsvm_probA, + libsvm_probB, + # libsvm_fit_status and libsvm_n_iter won't be used below. + libsvm_fit_status, + libsvm_n_iter, + ) = _libsvm.fit(iris.data, iris.target.astype(np.float64)) + + model_params = { + "support": libsvm_support, + "SV": libsvm_support_vectors, + "nSV": libsvm_n_class_SV, + "sv_coef": libsvm_sv_coef, + "intercept": libsvm_intercept, + "probA": libsvm_probA, + "probB": libsvm_probB, + } + pred = _libsvm.predict(iris.data, **model_params) + assert np.mean(pred == iris.target) > 0.95 + + # We unpack the values to create a dictionary with some of the return values + # from Libsvm's fit. + ( + libsvm_support, + libsvm_support_vectors, + libsvm_n_class_SV, + libsvm_sv_coef, + libsvm_intercept, + libsvm_probA, + libsvm_probB, + # libsvm_fit_status and libsvm_n_iter won't be used below. + libsvm_fit_status, + libsvm_n_iter, + ) = _libsvm.fit(iris.data, iris.target.astype(np.float64), kernel="linear") + + model_params = { + "support": libsvm_support, + "SV": libsvm_support_vectors, + "nSV": libsvm_n_class_SV, + "sv_coef": libsvm_sv_coef, + "intercept": libsvm_intercept, + "probA": libsvm_probA, + "probB": libsvm_probB, + } + pred = _libsvm.predict(iris.data, **model_params, kernel="linear") + assert np.mean(pred == iris.target) > 0.95 + + pred = _libsvm.cross_validation( + iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 + ) + assert np.mean(pred == iris.target) > 0.95 + + # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence + # we should get deterministic results (assuming that there is no other + # thread calling this wrapper calling `srand` concurrently). + pred2 = _libsvm.cross_validation( + iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 + ) + assert_array_equal(pred, pred2) + + +def test_precomputed(): + # SVC with a precomputed kernel. + # We test it with a toy dataset and with iris. + clf = svm.SVC(kernel="precomputed") + # Gram matrix for train data (square matrix) + # (we use just a linear kernel) + K = np.dot(X, np.array(X).T) + clf.fit(K, Y) + # Gram matrix for test data (rectangular matrix) + KT = np.dot(T, np.array(X).T) + pred = clf.predict(KT) + with pytest.raises(ValueError): + clf.predict(KT.T) + + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.support_, [1, 3]) + assert_array_equal(clf.intercept_, [0]) + assert_array_almost_equal(clf.support_, [1, 3]) + assert_array_equal(pred, true_result) + + # Gram matrix for test data but compute KT[i,j] + # for support vectors j only. + KT = np.zeros_like(KT) + for i in range(len(T)): + for j in clf.support_: + KT[i, j] = np.dot(T[i], X[j]) + + pred = clf.predict(KT) + assert_array_equal(pred, true_result) + + # same as before, but using a callable function instead of the kernel + # matrix. kernel is just a linear kernel + + def kfunc(x, y): + return np.dot(x, y.T) + + clf = svm.SVC(kernel=kfunc) + clf.fit(np.array(X), Y) + pred = clf.predict(T) + + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.intercept_, [0]) + assert_array_almost_equal(clf.support_, [1, 3]) + assert_array_equal(pred, true_result) + + # test a precomputed kernel with the iris dataset + # and check parameters against a linear SVC + clf = svm.SVC(kernel="precomputed") + clf2 = svm.SVC(kernel="linear") + K = np.dot(iris.data, iris.data.T) + clf.fit(K, iris.target) + clf2.fit(iris.data, iris.target) + pred = clf.predict(K) + assert_array_almost_equal(clf.support_, clf2.support_) + assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) + assert_array_almost_equal(clf.intercept_, clf2.intercept_) + assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) + + # Gram matrix for test data but compute KT[i,j] + # for support vectors j only. + K = np.zeros_like(K) + for i in range(len(iris.data)): + for j in clf.support_: + K[i, j] = np.dot(iris.data[i], iris.data[j]) + + pred = clf.predict(K) + assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) + + clf = svm.SVC(kernel=kfunc) + clf.fit(iris.data, iris.target) + assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) + + +def test_svr(): + # Test Support Vector Regression + + diabetes = datasets.load_diabetes() + for clf in ( + svm.NuSVR(kernel="linear", nu=0.4, C=1.0), + svm.NuSVR(kernel="linear", nu=0.4, C=10.0), + svm.SVR(kernel="linear", C=10.0), + svm.LinearSVR(dual="auto", C=10.0), + svm.LinearSVR(dual="auto", C=10.0), + ): + clf.fit(diabetes.data, diabetes.target) + assert clf.score(diabetes.data, diabetes.target) > 0.02 + + # non-regression test; previously, BaseLibSVM would check that + # len(np.unique(y)) < 2, which must only be done for SVC + svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data))) + svm.LinearSVR(dual="auto").fit(diabetes.data, np.ones(len(diabetes.data))) + + +def test_linearsvr(): + # check that SVR(kernel='linear') and LinearSVC() give + # comparable results + diabetes = datasets.load_diabetes() + lsvr = svm.LinearSVR(C=1e3, dual="auto").fit(diabetes.data, diabetes.target) + score1 = lsvr.score(diabetes.data, diabetes.target) + + svr = svm.SVR(kernel="linear", C=1e3).fit(diabetes.data, diabetes.target) + score2 = svr.score(diabetes.data, diabetes.target) + + assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) + assert_almost_equal(score1, score2, 2) + + +def test_linearsvr_fit_sampleweight(): + # check correct result when sample_weight is 1 + # check that SVR(kernel='linear') and LinearSVC() give + # comparable results + diabetes = datasets.load_diabetes() + n_samples = len(diabetes.target) + unit_weight = np.ones(n_samples) + lsvr = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + diabetes.data, diabetes.target, sample_weight=unit_weight + ) + score1 = lsvr.score(diabetes.data, diabetes.target) + + lsvr_no_weight = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + diabetes.data, diabetes.target + ) + score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) + + assert_allclose( + np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001 + ) + assert_almost_equal(score1, score2, 2) + + # check that fit(X) = fit([X1, X2, X3], sample_weight = [n1, n2, n3]) where + # X = X1 repeated n1 times, X2 repeated n2 times and so forth + random_state = check_random_state(0) + random_weight = random_state.randint(0, 10, n_samples) + lsvr_unflat = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + diabetes.data, diabetes.target, sample_weight=random_weight + ) + score3 = lsvr_unflat.score( + diabetes.data, diabetes.target, sample_weight=random_weight + ) + + X_flat = np.repeat(diabetes.data, random_weight, axis=0) + y_flat = np.repeat(diabetes.target, random_weight, axis=0) + lsvr_flat = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + X_flat, y_flat + ) + score4 = lsvr_flat.score(X_flat, y_flat) + + assert_almost_equal(score3, score4, 2) + + +def test_svr_errors(): + X = [[0.0], [1.0]] + y = [0.0, 0.5] + + # Bad kernel + clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]])) + clf.fit(X, y) + with pytest.raises(ValueError): + clf.predict(X) + + +def test_oneclass(): + # Test OneClassSVM + clf = svm.OneClassSVM() + clf.fit(X) + pred = clf.predict(T) + + assert_array_equal(pred, [1, -1, -1]) + assert pred.dtype == np.dtype("intp") + assert_array_almost_equal(clf.intercept_, [-1.218], decimal=3) + assert_array_almost_equal(clf.dual_coef_, [[0.750, 0.750, 0.750, 0.750]], decimal=3) + with pytest.raises(AttributeError): + (lambda: clf.coef_)() + + +def test_oneclass_decision_function(): + # Test OneClassSVM decision function + clf = svm.OneClassSVM() + rnd = check_random_state(2) + + # Generate train data + X = 0.3 * rnd.randn(100, 2) + X_train = np.r_[X + 2, X - 2] + + # Generate some regular novel observations + X = 0.3 * rnd.randn(20, 2) + X_test = np.r_[X + 2, X - 2] + # Generate some abnormal novel observations + X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) + + # fit the model + clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) + clf.fit(X_train) + + # predict things + y_pred_test = clf.predict(X_test) + assert np.mean(y_pred_test == 1) > 0.9 + y_pred_outliers = clf.predict(X_outliers) + assert np.mean(y_pred_outliers == -1) > 0.9 + dec_func_test = clf.decision_function(X_test) + assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) + dec_func_outliers = clf.decision_function(X_outliers) + assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) + + +def test_oneclass_score_samples(): + X_train = [[1, 1], [1, 2], [2, 1]] + clf = svm.OneClassSVM(gamma=1).fit(X_train) + assert_array_equal( + clf.score_samples([[2.0, 2.0]]), + clf.decision_function([[2.0, 2.0]]) + clf.offset_, + ) + + +def test_tweak_params(): + # Make sure some tweaking of parameters works. + # We change clf.dual_coef_ at run time and expect .predict() to change + # accordingly. Notice that this is not trivial since it involves a lot + # of C/Python copying in the libsvm bindings. + # The success of this test ensures that the mapping between libsvm and + # the python classifier is complete. + clf = svm.SVC(kernel="linear", C=1.0) + clf.fit(X, Y) + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.predict([[-0.1, -0.1]]), [1]) + clf._dual_coef_ = np.array([[0.0, 1.0]]) + assert_array_equal(clf.predict([[-0.1, -0.1]]), [2]) + + +def test_probability(): + # Predict probabilities using SVC + # This uses cross validation, so we use a slightly bigger testing set. + + for clf in ( + svm.SVC(probability=True, random_state=0, C=1.0), + svm.NuSVC(probability=True, random_state=0), + ): + clf.fit(iris.data, iris.target) + + prob_predict = clf.predict_proba(iris.data) + assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) + assert np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9 + + assert_almost_equal( + clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8 + ) + + +def test_decision_function(): + # Test decision_function + # Sanity check, test that decision_function implemented in python + # returns the same as the one in libsvm + # multi class: + clf = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo").fit( + iris.data, iris.target + ) + + dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ + + assert_array_almost_equal(dec, clf.decision_function(iris.data)) + + # binary: + clf.fit(X, Y) + dec = np.dot(X, clf.coef_.T) + clf.intercept_ + prediction = clf.predict(X) + assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) + assert_array_almost_equal( + prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int)] + ) + expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0]) + assert_array_almost_equal(clf.decision_function(X), expected, 2) + + # kernel binary: + clf = svm.SVC(kernel="rbf", gamma=1, decision_function_shape="ovo") + clf.fit(X, Y) + + rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) + dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ + assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) + + +@pytest.mark.parametrize("SVM", (svm.SVC, svm.NuSVC)) +def test_decision_function_shape(SVM): + # check that decision_function_shape='ovr' or 'ovo' gives + # correct shape and is consistent with predict + + clf = SVM(kernel="linear", decision_function_shape="ovr").fit( + iris.data, iris.target + ) + dec = clf.decision_function(iris.data) + assert dec.shape == (len(iris.data), 3) + assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) + + # with five classes: + X, y = make_blobs(n_samples=80, centers=5, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + clf = SVM(kernel="linear", decision_function_shape="ovr").fit(X_train, y_train) + dec = clf.decision_function(X_test) + assert dec.shape == (len(X_test), 5) + assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) + + # check shape of ovo_decition_function=True + clf = SVM(kernel="linear", decision_function_shape="ovo").fit(X_train, y_train) + dec = clf.decision_function(X_train) + assert dec.shape == (len(X_train), 10) + + +def test_svr_predict(): + # Test SVR's decision_function + # Sanity check, test that predict implemented in python + # returns the same as the one in libsvm + + X = iris.data + y = iris.target + + # linear kernel + reg = svm.SVR(kernel="linear", C=0.1).fit(X, y) + + dec = np.dot(X, reg.coef_.T) + reg.intercept_ + assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) + + # rbf kernel + reg = svm.SVR(kernel="rbf", gamma=1).fit(X, y) + + rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) + dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ + assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) + + +def test_weight(): + # Test class weights + clf = svm.SVC(class_weight={1: 0.1}) + # we give a small weights to class 1 + clf.fit(X, Y) + # so all predicted values belong to class 2 + assert_array_almost_equal(clf.predict(X), [2] * 6) + + X_, y_ = make_classification( + n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2 + ) + + for clf in ( + linear_model.LogisticRegression(), + svm.LinearSVC(dual="auto", random_state=0), + svm.SVC(), + ): + clf.set_params(class_weight={0: 0.1, 1: 10}) + clf.fit(X_[:100], y_[:100]) + y_pred = clf.predict(X_[100:]) + assert f1_score(y_[100:], y_pred) > 0.3 + + +@pytest.mark.parametrize("estimator", [svm.SVC(C=1e-2), svm.NuSVC()]) +def test_svm_classifier_sided_sample_weight(estimator): + # fit a linear SVM and check that giving more weight to opposed samples + # in the space will flip the decision toward these samples. + X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]] + estimator.set_params(kernel="linear") + + # check that with unit weights, a sample is supposed to be predicted on + # the boundary + sample_weight = [1] * 6 + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.decision_function([[-1.0, 1.0]]) + assert y_pred == pytest.approx(0) + + # give more weights to opposed samples + sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.decision_function([[-1.0, 1.0]]) + assert y_pred < 0 + + sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.decision_function([[-1.0, 1.0]]) + assert y_pred > 0 + + +@pytest.mark.parametrize("estimator", [svm.SVR(C=1e-2), svm.NuSVR(C=1e-2)]) +def test_svm_regressor_sided_sample_weight(estimator): + # similar test to test_svm_classifier_sided_sample_weight but for + # SVM regressors + X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]] + estimator.set_params(kernel="linear") + + # check that with unit weights, a sample is supposed to be predicted on + # the boundary + sample_weight = [1] * 6 + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.predict([[-1.0, 1.0]]) + assert y_pred == pytest.approx(1.5) + + # give more weights to opposed samples + sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.predict([[-1.0, 1.0]]) + assert y_pred < 1.5 + + sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.predict([[-1.0, 1.0]]) + assert y_pred > 1.5 + + +def test_svm_equivalence_sample_weight_C(): + # test that rescaling all samples is the same as changing C + clf = svm.SVC() + clf.fit(X, Y) + dual_coef_no_weight = clf.dual_coef_ + clf.set_params(C=100) + clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) + assert_allclose(dual_coef_no_weight, clf.dual_coef_) + + +@pytest.mark.parametrize( + "Estimator, err_msg", + [ + (svm.SVC, "Invalid input - all samples have zero or negative weights."), + (svm.NuSVC, "(negative dimensions are not allowed|nu is infeasible)"), + (svm.SVR, "Invalid input - all samples have zero or negative weights."), + (svm.NuSVR, "Invalid input - all samples have zero or negative weights."), + (svm.OneClassSVM, "Invalid input - all samples have zero or negative weights."), + ], + ids=["SVC", "NuSVC", "SVR", "NuSVR", "OneClassSVM"], +) +@pytest.mark.parametrize( + "sample_weight", + [[0] * len(Y), [-0.3] * len(Y)], + ids=["weights-are-zero", "weights-are-negative"], +) +def test_negative_sample_weights_mask_all_samples(Estimator, err_msg, sample_weight): + est = Estimator(kernel="linear") + with pytest.raises(ValueError, match=err_msg): + est.fit(X, Y, sample_weight=sample_weight) + + +@pytest.mark.parametrize( + "Classifier, err_msg", + [ + ( + svm.SVC, + ( + "Invalid input - all samples with positive weights belong to the same" + " class" + ), + ), + (svm.NuSVC, "specified nu is infeasible"), + ], + ids=["SVC", "NuSVC"], +) +@pytest.mark.parametrize( + "sample_weight", + [[0, -0.5, 0, 1, 1, 1], [1, 1, 1, 0, -0.1, -0.3]], + ids=["mask-label-1", "mask-label-2"], +) +def test_negative_weights_svc_leave_just_one_label(Classifier, err_msg, sample_weight): + clf = Classifier(kernel="linear") + with pytest.raises(ValueError, match=err_msg): + clf.fit(X, Y, sample_weight=sample_weight) + + +@pytest.mark.parametrize( + "Classifier, model", + [ + (svm.SVC, {"when-left": [0.3998, 0.4], "when-right": [0.4, 0.3999]}), + (svm.NuSVC, {"when-left": [0.3333, 0.3333], "when-right": [0.3333, 0.3333]}), + ], + ids=["SVC", "NuSVC"], +) +@pytest.mark.parametrize( + "sample_weight, mask_side", + [([1, -0.5, 1, 1, 1, 1], "when-left"), ([1, 1, 1, 0, 1, 1], "when-right")], + ids=["partial-mask-label-1", "partial-mask-label-2"], +) +def test_negative_weights_svc_leave_two_labels( + Classifier, model, sample_weight, mask_side +): + clf = Classifier(kernel="linear") + clf.fit(X, Y, sample_weight=sample_weight) + assert_allclose(clf.coef_, [model[mask_side]], rtol=1e-3) + + +@pytest.mark.parametrize( + "Estimator", [svm.SVC, svm.NuSVC, svm.NuSVR], ids=["SVC", "NuSVC", "NuSVR"] +) +@pytest.mark.parametrize( + "sample_weight", + [[1, -0.5, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1]], + ids=["partial-mask-label-1", "partial-mask-label-2"], +) +def test_negative_weight_equal_coeffs(Estimator, sample_weight): + # model generates equal coefficients + est = Estimator(kernel="linear") + est.fit(X, Y, sample_weight=sample_weight) + coef = np.abs(est.coef_).ravel() + assert coef[0] == pytest.approx(coef[1], rel=1e-3) + + +@ignore_warnings(category=UndefinedMetricWarning) +def test_auto_weight(): + # Test class weights for imbalanced data + from sklearn.linear_model import LogisticRegression + + # We take as dataset the two-dimensional projection of iris so + # that it is not separable and remove half of predictors from + # class 1. + # We add one to the targets as a non-regression test: + # class_weight="balanced" + # used to work only when the labels where a range [0..K). + from sklearn.utils import compute_class_weight + + X, y = iris.data[:, :2], iris.target + 1 + unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) + + classes = np.unique(y[unbalanced]) + class_weights = compute_class_weight("balanced", classes=classes, y=y[unbalanced]) + assert np.argmax(class_weights) == 2 + + for clf in ( + svm.SVC(kernel="linear"), + svm.LinearSVC(dual="auto", random_state=0), + LogisticRegression(), + ): + # check that score is better when class='balanced' is set. + y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) + clf.set_params(class_weight="balanced") + y_pred_balanced = clf.fit( + X[unbalanced], + y[unbalanced], + ).predict(X) + assert metrics.f1_score(y, y_pred, average="macro") <= metrics.f1_score( + y, y_pred_balanced, average="macro" + ) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_bad_input(lil_container): + # Test dimensions for labels + Y2 = Y[:-1] # wrong dimensions for labels + with pytest.raises(ValueError): + svm.SVC().fit(X, Y2) + + # Test with arrays that are non-contiguous. + for clf in (svm.SVC(), svm.LinearSVC(dual="auto", random_state=0)): + Xf = np.asfortranarray(X) + assert not Xf.flags["C_CONTIGUOUS"] + yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) + yf = yf[:, -1] + assert not yf.flags["F_CONTIGUOUS"] + assert not yf.flags["C_CONTIGUOUS"] + clf.fit(Xf, yf) + assert_array_equal(clf.predict(T), true_result) + + # error for precomputed kernelsx + clf = svm.SVC(kernel="precomputed") + with pytest.raises(ValueError): + clf.fit(X, Y) + + # predict with sparse input when trained with dense + clf = svm.SVC().fit(X, Y) + with pytest.raises(ValueError): + clf.predict(lil_container(X)) + + Xt = np.array(X).T + clf.fit(np.dot(X, Xt), Y) + with pytest.raises(ValueError): + clf.predict(X) + + clf = svm.SVC() + clf.fit(X, Y) + with pytest.raises(ValueError): + clf.predict(Xt) + + +def test_svc_nonfinite_params(): + # Check SVC throws ValueError when dealing with non-finite parameter values + rng = np.random.RandomState(0) + n_samples = 10 + fmax = np.finfo(np.float64).max + X = fmax * rng.uniform(size=(n_samples, 2)) + y = rng.randint(0, 2, size=n_samples) + + clf = svm.SVC() + msg = "The dual coefficients or intercepts are not finite" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_unicode_kernel(): + # Test that a unicode kernel name does not cause a TypeError + clf = svm.SVC(kernel="linear", probability=True) + clf.fit(X, Y) + clf.predict_proba(T) + _libsvm.cross_validation( + iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_precomputed(csr_container): + clf = svm.SVC(kernel="precomputed") + sparse_gram = csr_container([[1, 0], [0, 1]]) + with pytest.raises(TypeError, match="Sparse precomputed"): + clf.fit(sparse_gram, [0, 1]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_fit_support_vectors_empty(csr_container): + # Regression test for #14893 + X_train = csr_container([[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]]) + y_train = np.array([0.04, 0.04, 0.10, 0.16]) + model = svm.SVR(kernel="linear") + model.fit(X_train, y_train) + assert not model.support_vectors_.data.size + assert not model.dual_coef_.data.size + + +@pytest.mark.parametrize("loss", ["hinge", "squared_hinge"]) +@pytest.mark.parametrize("penalty", ["l1", "l2"]) +@pytest.mark.parametrize("dual", [True, False]) +def test_linearsvc_parameters(loss, penalty, dual): + # Test possible parameter combinations in LinearSVC + # Generate list of possible parameter combinations + X, y = make_classification(n_samples=5, n_features=5, random_state=0) + + clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual, random_state=0) + if ( + (loss, penalty) == ("hinge", "l1") + or (loss, penalty, dual) == ("hinge", "l2", False) + or (penalty, dual) == ("l1", True) + ): + with pytest.raises( + ValueError, + match="Unsupported set of arguments.*penalty='%s.*loss='%s.*dual=%s" + % (penalty, loss, dual), + ): + clf.fit(X, y) + else: + clf.fit(X, y) + + +def test_linearsvc(): + # Test basic routines using LinearSVC + clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y) + + # by default should have intercept + assert clf.fit_intercept + + assert_array_equal(clf.predict(T), true_result) + assert_array_almost_equal(clf.intercept_, [0], decimal=3) + + # the same with l1 penalty + clf = svm.LinearSVC( + penalty="l1", loss="squared_hinge", dual=False, random_state=0 + ).fit(X, Y) + assert_array_equal(clf.predict(T), true_result) + + # l2 penalty with dual formulation + clf = svm.LinearSVC(penalty="l2", dual=True, random_state=0).fit(X, Y) + assert_array_equal(clf.predict(T), true_result) + + # l2 penalty, l1 loss + clf = svm.LinearSVC(penalty="l2", loss="hinge", dual=True, random_state=0) + clf.fit(X, Y) + assert_array_equal(clf.predict(T), true_result) + + # test also decision function + dec = clf.decision_function(T) + res = (dec > 0).astype(int) + 1 + assert_array_equal(res, true_result) + + +def test_linearsvc_crammer_singer(): + # Test LinearSVC with crammer_singer multi-class svm + ovr_clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, iris.target) + cs_clf = svm.LinearSVC(dual="auto", multi_class="crammer_singer", random_state=0) + cs_clf.fit(iris.data, iris.target) + + # similar prediction for ovr and crammer-singer: + assert (ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > 0.9 + + # classifiers shouldn't be the same + assert (ovr_clf.coef_ != cs_clf.coef_).all() + + # test decision function + assert_array_equal( + cs_clf.predict(iris.data), + np.argmax(cs_clf.decision_function(iris.data), axis=1), + ) + dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ + assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) + + +def test_linearsvc_fit_sampleweight(): + # check correct result when sample_weight is 1 + n_samples = len(X) + unit_weight = np.ones(n_samples) + clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y) + clf_unitweight = svm.LinearSVC( + dual="auto", random_state=0, tol=1e-12, max_iter=1000 + ).fit(X, Y, sample_weight=unit_weight) + + # check if same as sample_weight=None + assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) + assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) + + # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where + # X = X1 repeated n1 times, X2 repeated n2 times and so forth + + random_state = check_random_state(0) + random_weight = random_state.randint(0, 10, n_samples) + lsvc_unflat = svm.LinearSVC( + dual="auto", random_state=0, tol=1e-12, max_iter=1000 + ).fit(X, Y, sample_weight=random_weight) + + pred1 = lsvc_unflat.predict(T) + + X_flat = np.repeat(X, random_weight, axis=0) + y_flat = np.repeat(Y, random_weight, axis=0) + lsvc_flat = svm.LinearSVC( + dual="auto", random_state=0, tol=1e-12, max_iter=1000 + ).fit(X_flat, y_flat) + pred2 = lsvc_flat.predict(T) + + assert_array_equal(pred1, pred2) + assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) + + +def test_crammer_singer_binary(): + # Test Crammer-Singer formulation in the binary case + X, y = make_classification(n_classes=2, random_state=0) + + for fit_intercept in (True, False): + acc = ( + svm.LinearSVC( + dual="auto", + fit_intercept=fit_intercept, + multi_class="crammer_singer", + random_state=0, + ) + .fit(X, y) + .score(X, y) + ) + assert acc > 0.9 + + +def test_linearsvc_iris(): + # Test that LinearSVC gives plausible predictions on the iris dataset + # Also, test symbolic class names (classes_). + target = iris.target_names[iris.target] + clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, target) + assert set(clf.classes_) == set(iris.target_names) + assert np.mean(clf.predict(iris.data) == target) > 0.8 + + dec = clf.decision_function(iris.data) + pred = iris.target_names[np.argmax(dec, 1)] + assert_array_equal(pred, clf.predict(iris.data)) + + +def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): + # Test that dense liblinear honours intercept_scaling param + X = [[2, 1], [3, 1], [1, 3], [2, 3]] + y = [0, 0, 1, 1] + clf = classifier( + fit_intercept=True, + penalty="l1", + loss="squared_hinge", + dual=False, + C=4, + tol=1e-7, + random_state=0, + ) + assert clf.intercept_scaling == 1, clf.intercept_scaling + assert clf.fit_intercept + + # when intercept_scaling is low the intercept value is highly "penalized" + # by regularization + clf.intercept_scaling = 1 + clf.fit(X, y) + assert_almost_equal(clf.intercept_, 0, decimal=5) + + # when intercept_scaling is sufficiently high, the intercept value + # is not affected by regularization + clf.intercept_scaling = 100 + clf.fit(X, y) + intercept1 = clf.intercept_ + assert intercept1 < -1 + + # when intercept_scaling is sufficiently high, the intercept value + # doesn't depend on intercept_scaling value + clf.intercept_scaling = 1000 + clf.fit(X, y) + intercept2 = clf.intercept_ + assert_array_almost_equal(intercept1, intercept2, decimal=2) + + +def test_liblinear_set_coef(): + # multi-class case + clf = svm.LinearSVC(dual="auto").fit(iris.data, iris.target) + values = clf.decision_function(iris.data) + clf.coef_ = clf.coef_.copy() + clf.intercept_ = clf.intercept_.copy() + values2 = clf.decision_function(iris.data) + assert_array_almost_equal(values, values2) + + # binary-class case + X = [[2, 1], [3, 1], [1, 3], [2, 3]] + y = [0, 0, 1, 1] + + clf = svm.LinearSVC(dual="auto").fit(X, y) + values = clf.decision_function(X) + clf.coef_ = clf.coef_.copy() + clf.intercept_ = clf.intercept_.copy() + values2 = clf.decision_function(X) + assert_array_equal(values, values2) + + +def test_immutable_coef_property(): + # Check that primal coef modification are not silently ignored + svms = [ + svm.SVC(kernel="linear").fit(iris.data, iris.target), + svm.NuSVC(kernel="linear").fit(iris.data, iris.target), + svm.SVR(kernel="linear").fit(iris.data, iris.target), + svm.NuSVR(kernel="linear").fit(iris.data, iris.target), + svm.OneClassSVM(kernel="linear").fit(iris.data), + ] + for clf in svms: + with pytest.raises(AttributeError): + clf.__setattr__("coef_", np.arange(3)) + with pytest.raises((RuntimeError, ValueError)): + clf.coef_.__setitem__((0, 0), 0) + + +def test_linearsvc_verbose(): + # stdout: redirect + import os + + stdout = os.dup(1) # save original stdout + os.dup2(os.pipe()[1], 1) # replace it + + # actual call + clf = svm.LinearSVC(dual="auto", verbose=1) + clf.fit(X, Y) + + # stdout: restore + os.dup2(stdout, 1) # restore original stdout + + +def test_svc_clone_with_callable_kernel(): + # create SVM with callable linear kernel, check that results are the same + # as with built-in linear kernel + svm_callable = svm.SVC( + kernel=lambda x, y: np.dot(x, y.T), + probability=True, + random_state=0, + decision_function_shape="ovr", + ) + # clone for checking clonability with lambda functions.. + svm_cloned = base.clone(svm_callable) + svm_cloned.fit(iris.data, iris.target) + + svm_builtin = svm.SVC( + kernel="linear", probability=True, random_state=0, decision_function_shape="ovr" + ) + svm_builtin.fit(iris.data, iris.target) + + assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) + assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) + assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) + + assert_array_almost_equal( + svm_cloned.predict_proba(iris.data), + svm_builtin.predict_proba(iris.data), + decimal=4, + ) + assert_array_almost_equal( + svm_cloned.decision_function(iris.data), + svm_builtin.decision_function(iris.data), + ) + + +def test_svc_bad_kernel(): + svc = svm.SVC(kernel=lambda x, y: x) + with pytest.raises(ValueError): + svc.fit(X, Y) + + +def test_libsvm_convergence_warnings(): + a = svm.SVC( + kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=2 + ) + warning_msg = ( + r"Solver terminated early \(max_iter=2\). Consider pre-processing " + r"your data with StandardScaler or MinMaxScaler." + ) + with pytest.warns(ConvergenceWarning, match=warning_msg): + a.fit(np.array(X), Y) + assert np.all(a.n_iter_ == 2) + + +def test_unfitted(): + X = "foo!" # input validation not required when SVM not fitted + + clf = svm.SVC() + with pytest.raises(Exception, match=r".*\bSVC\b.*\bnot\b.*\bfitted\b"): + clf.predict(X) + + clf = svm.NuSVR() + with pytest.raises(Exception, match=r".*\bNuSVR\b.*\bnot\b.*\bfitted\b"): + clf.predict(X) + + +# ignore convergence warnings from max_iter=1 +@ignore_warnings +def test_consistent_proba(): + a = svm.SVC(probability=True, max_iter=1, random_state=0) + proba_1 = a.fit(X, Y).predict_proba(X) + a = svm.SVC(probability=True, max_iter=1, random_state=0) + proba_2 = a.fit(X, Y).predict_proba(X) + assert_array_almost_equal(proba_1, proba_2) + + +def test_linear_svm_convergence_warnings(): + # Test that warnings are raised if model does not converge + + lsvc = svm.LinearSVC(dual="auto", random_state=0, max_iter=2) + warning_msg = "Liblinear failed to converge, increase the number of iterations." + with pytest.warns(ConvergenceWarning, match=warning_msg): + lsvc.fit(X, Y) + # Check that we have an n_iter_ attribute with int type as opposed to a + # numpy array or an np.int32 so as to match the docstring. + assert isinstance(lsvc.n_iter_, int) + assert lsvc.n_iter_ == 2 + + lsvr = svm.LinearSVR(dual="auto", random_state=0, max_iter=2) + with pytest.warns(ConvergenceWarning, match=warning_msg): + lsvr.fit(iris.data, iris.target) + assert isinstance(lsvr.n_iter_, int) + assert lsvr.n_iter_ == 2 + + +def test_svr_coef_sign(): + # Test that SVR(kernel="linear") has coef_ with the right sign. + # Non-regression test for #2933. + X = np.random.RandomState(21).randn(10, 3) + y = np.random.RandomState(12).randn(10) + + for svr in [ + svm.SVR(kernel="linear"), + svm.NuSVR(kernel="linear"), + svm.LinearSVR(dual="auto"), + ]: + svr.fit(X, y) + assert_array_almost_equal( + svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_ + ) + + +def test_lsvc_intercept_scaling_zero(): + # Test that intercept_scaling is ignored when fit_intercept is False + + lsvc = svm.LinearSVC(dual="auto", fit_intercept=False) + lsvc.fit(X, Y) + assert lsvc.intercept_ == 0.0 + + +def test_hasattr_predict_proba(): + # Method must be (un)available before or after fit, switched by + # `probability` param + + G = svm.SVC(probability=True) + assert hasattr(G, "predict_proba") + G.fit(iris.data, iris.target) + assert hasattr(G, "predict_proba") + + G = svm.SVC(probability=False) + assert not hasattr(G, "predict_proba") + G.fit(iris.data, iris.target) + assert not hasattr(G, "predict_proba") + + # Switching to `probability=True` after fitting should make + # predict_proba available, but calling it must not work: + G.probability = True + assert hasattr(G, "predict_proba") + msg = "predict_proba is not available when fitted with probability=False" + + with pytest.raises(NotFittedError, match=msg): + G.predict_proba(iris.data) + + +def test_decision_function_shape_two_class(): + for n_classes in [2, 3]: + X, y = make_blobs(centers=n_classes, random_state=0) + for estimator in [svm.SVC, svm.NuSVC]: + clf = OneVsRestClassifier(estimator(decision_function_shape="ovr")).fit( + X, y + ) + assert len(clf.predict(X)) == len(y) + + +def test_ovr_decision_function(): + # One point from each quadrant represents one class + X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) + y_train = [0, 1, 2, 3] + + # First point is closer to the decision boundaries than the second point + base_points = np.array([[5, 5], [10, 10]]) + + # For all the quadrants (classes) + X_test = np.vstack( + ( + base_points * [1, 1], # Q1 + base_points * [-1, 1], # Q2 + base_points * [-1, -1], # Q3 + base_points * [1, -1], # Q4 + ) + ) + + y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 + + clf = svm.SVC(kernel="linear", decision_function_shape="ovr") + clf.fit(X_train, y_train) + + y_pred = clf.predict(X_test) + + # Test if the prediction is the same as y + assert_array_equal(y_pred, y_test) + + deci_val = clf.decision_function(X_test) + + # Assert that the predicted class has the maximum value + assert_array_equal(np.argmax(deci_val, axis=1), y_pred) + + # Get decision value at test points for the predicted class + pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) + + # Assert pred_class_deci_val > 0 here + assert np.min(pred_class_deci_val) > 0.0 + + # Test if the first point has lower decision value on every quadrant + # compared to the second point + assert np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]) + + +@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC]) +def test_svc_invalid_break_ties_param(SVCClass): + X, y = make_blobs(random_state=42) + + svm = SVCClass( + kernel="linear", decision_function_shape="ovo", break_ties=True, random_state=42 + ).fit(X, y) + + with pytest.raises(ValueError, match="break_ties must be False"): + svm.predict(y) + + +@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC]) +def test_svc_ovr_tie_breaking(SVCClass): + """Test if predict breaks ties in OVR mode. + Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277 + """ + X, y = make_blobs(random_state=0, n_samples=20, n_features=2) + + xs = np.linspace(X[:, 0].min(), X[:, 0].max(), 100) + ys = np.linspace(X[:, 1].min(), X[:, 1].max(), 100) + xx, yy = np.meshgrid(xs, ys) + + common_params = dict( + kernel="rbf", gamma=1e6, random_state=42, decision_function_shape="ovr" + ) + svm = SVCClass( + break_ties=False, + **common_params, + ).fit(X, y) + pred = svm.predict(np.c_[xx.ravel(), yy.ravel()]) + dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()]) + assert not np.all(pred == np.argmax(dv, axis=1)) + + svm = SVCClass( + break_ties=True, + **common_params, + ).fit(X, y) + pred = svm.predict(np.c_[xx.ravel(), yy.ravel()]) + dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()]) + assert np.all(pred == np.argmax(dv, axis=1)) + + +def test_gamma_scale(): + X, y = [[0.0], [1.0]], [0, 1] + + clf = svm.SVC() + clf.fit(X, y) + assert_almost_equal(clf._gamma, 4) + + +@pytest.mark.parametrize( + "SVM, params", + [ + (LinearSVC, {"penalty": "l1", "loss": "squared_hinge", "dual": False}), + (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": True}), + (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": False}), + (LinearSVC, {"penalty": "l2", "loss": "hinge", "dual": True}), + (LinearSVR, {"loss": "epsilon_insensitive", "dual": True}), + (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}), + (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}), + ], +) +def test_linearsvm_liblinear_sample_weight(SVM, params): + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.dtype("float"), + ) + y = np.array( + [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int") + ) + + X2 = np.vstack([X, X]) + y2 = np.hstack([y, 3 - y]) + sample_weight = np.ones(shape=len(y) * 2) + sample_weight[len(y) :] = 0 + X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0) + + base_estimator = SVM(random_state=42) + base_estimator.set_params(**params) + base_estimator.set_params(tol=1e-12, max_iter=1000) + est_no_weight = base.clone(base_estimator).fit(X, y) + est_with_weight = base.clone(base_estimator).fit( + X2, y2, sample_weight=sample_weight + ) + + for method in ("predict", "decision_function"): + if hasattr(base_estimator, method): + X_est_no_weight = getattr(est_no_weight, method)(X) + X_est_with_weight = getattr(est_with_weight, method)(X) + assert_allclose(X_est_no_weight, X_est_with_weight) + + +@pytest.mark.parametrize("Klass", (OneClassSVM, SVR, NuSVR)) +def test_n_support(Klass): + # Make n_support is correct for oneclass and SVR (used to be + # non-initialized) + # this is a non regression test for issue #14774 + X = np.array([[0], [0.44], [0.45], [0.46], [1]]) + y = np.arange(X.shape[0]) + est = Klass() + assert not hasattr(est, "n_support_") + est.fit(X, y) + assert est.n_support_[0] == est.support_vectors_.shape[0] + assert est.n_support_.size == 1 + + +@pytest.mark.parametrize("Estimator", [svm.SVC, svm.SVR]) +def test_custom_kernel_not_array_input(Estimator): + """Test using a custom kernel that is not fed with array-like for floats""" + data = ["A A", "A", "B", "B B", "A B"] + X = np.array([[2, 0], [1, 0], [0, 1], [0, 2], [1, 1]]) # count encoding + y = np.array([1, 1, 2, 2, 1]) + + def string_kernel(X1, X2): + assert isinstance(X1[0], str) + n_samples1 = _num_samples(X1) + n_samples2 = _num_samples(X2) + K = np.zeros((n_samples1, n_samples2)) + for ii in range(n_samples1): + for jj in range(ii, n_samples2): + K[ii, jj] = X1[ii].count("A") * X2[jj].count("A") + K[ii, jj] += X1[ii].count("B") * X2[jj].count("B") + K[jj, ii] = K[ii, jj] + return K + + K = string_kernel(data, data) + assert_array_equal(np.dot(X, X.T), K) + + svc1 = Estimator(kernel=string_kernel).fit(data, y) + svc2 = Estimator(kernel="linear").fit(X, y) + svc3 = Estimator(kernel="precomputed").fit(K, y) + + assert svc1.score(data, y) == svc3.score(K, y) + assert svc1.score(data, y) == svc2.score(X, y) + if hasattr(svc1, "decision_function"): # classifier + assert_allclose(svc1.decision_function(data), svc2.decision_function(X)) + assert_allclose(svc1.decision_function(data), svc3.decision_function(K)) + assert_array_equal(svc1.predict(data), svc2.predict(X)) + assert_array_equal(svc1.predict(data), svc3.predict(K)) + else: # regressor + assert_allclose(svc1.predict(data), svc2.predict(X)) + assert_allclose(svc1.predict(data), svc3.predict(K)) + + +def test_svc_raises_error_internal_representation(): + """Check that SVC raises error when internal representation is altered. + + Non-regression test for #18891 and https://nvd.nist.gov/vuln/detail/CVE-2020-28975 + """ + clf = svm.SVC(kernel="linear").fit(X, Y) + clf._n_support[0] = 1000000 + + msg = "The internal representation of SVC was altered" + with pytest.raises(ValueError, match=msg): + clf.predict(X) + + +@pytest.mark.parametrize( + "estimator, expected_n_iter_type", + [ + (svm.SVC, np.ndarray), + (svm.NuSVC, np.ndarray), + (svm.SVR, int), + (svm.NuSVR, int), + (svm.OneClassSVM, int), + ], +) +@pytest.mark.parametrize( + "dataset", + [ + make_classification(n_classes=2, n_informative=2, random_state=0), + make_classification(n_classes=3, n_informative=3, random_state=0), + make_classification(n_classes=4, n_informative=4, random_state=0), + ], +) +def test_n_iter_libsvm(estimator, expected_n_iter_type, dataset): + # Check that the type of n_iter_ is correct for the classes that inherit + # from BaseSVC. + # Note that for SVC, and NuSVC this is an ndarray; while for SVR, NuSVR, and + # OneClassSVM, it is an int. + # For SVC and NuSVC also check the shape of n_iter_. + X, y = dataset + n_iter = estimator(kernel="linear").fit(X, y).n_iter_ + assert type(n_iter) == expected_n_iter_type + if estimator in [svm.SVC, svm.NuSVC]: + n_classes = len(np.unique(y)) + assert n_iter.shape == (n_classes * (n_classes - 1) // 2,) + + +# TODO(1.5): Remove +@pytest.mark.parametrize("Estimator", [LinearSVR, LinearSVC]) +def test_dual_auto_deprecation_warning(Estimator): + svm = Estimator() + msg = ( + "The default value of `dual` will change from `True` to `'auto'` in" + " 1.5. Set the value of `dual` explicitly to suppress the warning." + ) + with pytest.warns(FutureWarning, match=re.escape(msg)): + svm.fit(X, Y) + + +@pytest.mark.parametrize("loss", ["squared_hinge", "squared_epsilon_insensitive"]) +def test_dual_auto(loss): + # OvR, L2, N > M (6,2) + dual = _validate_dual_parameter("auto", loss, "l2", "ovr", np.asarray(X)) + assert dual is False + # OvR, L2, N < M (2,6) + dual = _validate_dual_parameter("auto", loss, "l2", "ovr", np.asarray(X).T) + assert dual is True + + +def test_dual_auto_edge_cases(): + # Hinge, OvR, L2, N > M (6,2) + dual = _validate_dual_parameter("auto", "hinge", "l2", "ovr", np.asarray(X)) + assert dual is True # only supports True + dual = _validate_dual_parameter( + "auto", "epsilon_insensitive", "l2", "ovr", np.asarray(X) + ) + assert dual is True # only supports True + # SqHinge, OvR, L1, N < M (2,6) + dual = _validate_dual_parameter( + "auto", "squared_hinge", "l1", "ovr", np.asarray(X).T + ) + assert dual is False # only supports False