applied-ai-018 commited on
Commit
7f44192
·
verified ·
1 Parent(s): e9bddc1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py +267 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py +0 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py +377 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py +52 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py +286 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py +171 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_base.py +266 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py +522 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py +514 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py +792 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py +300 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py +1161 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py +136 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py +0 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py +153 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py +93 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py +1017 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py +684 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py +254 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py +615 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py +323 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py +72 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/impute/__init__.py +24 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/impute/_base.py +1075 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/impute/_iterative.py +906 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/impute/_knn.py +401 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py +0 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py +107 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py +220 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py +1754 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py +547 -0
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Virgile Fritsch <[email protected]>
2
+ #
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import OutlierMixin, _fit_context
10
+ from ..metrics import accuracy_score
11
+ from ..utils._param_validation import Interval
12
+ from ..utils.validation import check_is_fitted
13
+ from ._robust_covariance import MinCovDet
14
+
15
+
16
+ class EllipticEnvelope(OutlierMixin, MinCovDet):
17
+ """An object for detecting outliers in a Gaussian distributed dataset.
18
+
19
+ Read more in the :ref:`User Guide <outlier_detection>`.
20
+
21
+ Parameters
22
+ ----------
23
+ store_precision : bool, default=True
24
+ Specify if the estimated precision is stored.
25
+
26
+ assume_centered : bool, default=False
27
+ If True, the support of robust location and covariance estimates
28
+ is computed, and a covariance estimate is recomputed from it,
29
+ without centering the data.
30
+ Useful to work with data whose mean is significantly equal to
31
+ zero but is not exactly zero.
32
+ If False, the robust location and covariance are directly computed
33
+ with the FastMCD algorithm without additional treatment.
34
+
35
+ support_fraction : float, default=None
36
+ The proportion of points to be included in the support of the raw
37
+ MCD estimate. If None, the minimum value of support_fraction will
38
+ be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`.
39
+ Range is (0, 1).
40
+
41
+ contamination : float, default=0.1
42
+ The amount of contamination of the data set, i.e. the proportion
43
+ of outliers in the data set. Range is (0, 0.5].
44
+
45
+ random_state : int, RandomState instance or None, default=None
46
+ Determines the pseudo random number generator for shuffling
47
+ the data. Pass an int for reproducible results across multiple function
48
+ calls. See :term:`Glossary <random_state>`.
49
+
50
+ Attributes
51
+ ----------
52
+ location_ : ndarray of shape (n_features,)
53
+ Estimated robust location.
54
+
55
+ covariance_ : ndarray of shape (n_features, n_features)
56
+ Estimated robust covariance matrix.
57
+
58
+ precision_ : ndarray of shape (n_features, n_features)
59
+ Estimated pseudo inverse matrix.
60
+ (stored only if store_precision is True)
61
+
62
+ support_ : ndarray of shape (n_samples,)
63
+ A mask of the observations that have been used to compute the
64
+ robust estimates of location and shape.
65
+
66
+ offset_ : float
67
+ Offset used to define the decision function from the raw scores.
68
+ We have the relation: ``decision_function = score_samples - offset_``.
69
+ The offset depends on the contamination parameter and is defined in
70
+ such a way we obtain the expected number of outliers (samples with
71
+ decision function < 0) in training.
72
+
73
+ .. versionadded:: 0.20
74
+
75
+ raw_location_ : ndarray of shape (n_features,)
76
+ The raw robust estimated location before correction and re-weighting.
77
+
78
+ raw_covariance_ : ndarray of shape (n_features, n_features)
79
+ The raw robust estimated covariance before correction and re-weighting.
80
+
81
+ raw_support_ : ndarray of shape (n_samples,)
82
+ A mask of the observations that have been used to compute
83
+ the raw robust estimates of location and shape, before correction
84
+ and re-weighting.
85
+
86
+ dist_ : ndarray of shape (n_samples,)
87
+ Mahalanobis distances of the training set (on which :meth:`fit` is
88
+ called) observations.
89
+
90
+ n_features_in_ : int
91
+ Number of features seen during :term:`fit`.
92
+
93
+ .. versionadded:: 0.24
94
+
95
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
96
+ Names of features seen during :term:`fit`. Defined only when `X`
97
+ has feature names that are all strings.
98
+
99
+ .. versionadded:: 1.0
100
+
101
+ See Also
102
+ --------
103
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
104
+ GraphicalLasso : Sparse inverse covariance estimation
105
+ with an l1-penalized estimator.
106
+ LedoitWolf : LedoitWolf Estimator.
107
+ MinCovDet : Minimum Covariance Determinant
108
+ (robust estimator of covariance).
109
+ OAS : Oracle Approximating Shrinkage Estimator.
110
+ ShrunkCovariance : Covariance estimator with shrinkage.
111
+
112
+ Notes
113
+ -----
114
+ Outlier detection from covariance estimation may break or not
115
+ perform well in high-dimensional settings. In particular, one will
116
+ always take care to work with ``n_samples > n_features ** 2``.
117
+
118
+ References
119
+ ----------
120
+ .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
121
+ minimum covariance determinant estimator" Technometrics 41(3), 212
122
+ (1999)
123
+
124
+ Examples
125
+ --------
126
+ >>> import numpy as np
127
+ >>> from sklearn.covariance import EllipticEnvelope
128
+ >>> true_cov = np.array([[.8, .3],
129
+ ... [.3, .4]])
130
+ >>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
131
+ ... cov=true_cov,
132
+ ... size=500)
133
+ >>> cov = EllipticEnvelope(random_state=0).fit(X)
134
+ >>> # predict returns 1 for an inlier and -1 for an outlier
135
+ >>> cov.predict([[0, 0],
136
+ ... [3, 3]])
137
+ array([ 1, -1])
138
+ >>> cov.covariance_
139
+ array([[0.7411..., 0.2535...],
140
+ [0.2535..., 0.3053...]])
141
+ >>> cov.location_
142
+ array([0.0813... , 0.0427...])
143
+ """
144
+
145
+ _parameter_constraints: dict = {
146
+ **MinCovDet._parameter_constraints,
147
+ "contamination": [Interval(Real, 0, 0.5, closed="right")],
148
+ }
149
+
150
+ def __init__(
151
+ self,
152
+ *,
153
+ store_precision=True,
154
+ assume_centered=False,
155
+ support_fraction=None,
156
+ contamination=0.1,
157
+ random_state=None,
158
+ ):
159
+ super().__init__(
160
+ store_precision=store_precision,
161
+ assume_centered=assume_centered,
162
+ support_fraction=support_fraction,
163
+ random_state=random_state,
164
+ )
165
+ self.contamination = contamination
166
+
167
+ @_fit_context(prefer_skip_nested_validation=True)
168
+ def fit(self, X, y=None):
169
+ """Fit the EllipticEnvelope model.
170
+
171
+ Parameters
172
+ ----------
173
+ X : array-like of shape (n_samples, n_features)
174
+ Training data.
175
+
176
+ y : Ignored
177
+ Not used, present for API consistency by convention.
178
+
179
+ Returns
180
+ -------
181
+ self : object
182
+ Returns the instance itself.
183
+ """
184
+ super().fit(X)
185
+ self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)
186
+ return self
187
+
188
+ def decision_function(self, X):
189
+ """Compute the decision function of the given observations.
190
+
191
+ Parameters
192
+ ----------
193
+ X : array-like of shape (n_samples, n_features)
194
+ The data matrix.
195
+
196
+ Returns
197
+ -------
198
+ decision : ndarray of shape (n_samples,)
199
+ Decision function of the samples.
200
+ It is equal to the shifted Mahalanobis distances.
201
+ The threshold for being an outlier is 0, which ensures a
202
+ compatibility with other outlier detection algorithms.
203
+ """
204
+ check_is_fitted(self)
205
+ negative_mahal_dist = self.score_samples(X)
206
+ return negative_mahal_dist - self.offset_
207
+
208
+ def score_samples(self, X):
209
+ """Compute the negative Mahalanobis distances.
210
+
211
+ Parameters
212
+ ----------
213
+ X : array-like of shape (n_samples, n_features)
214
+ The data matrix.
215
+
216
+ Returns
217
+ -------
218
+ negative_mahal_distances : array-like of shape (n_samples,)
219
+ Opposite of the Mahalanobis distances.
220
+ """
221
+ check_is_fitted(self)
222
+ return -self.mahalanobis(X)
223
+
224
+ def predict(self, X):
225
+ """
226
+ Predict labels (1 inlier, -1 outlier) of X according to fitted model.
227
+
228
+ Parameters
229
+ ----------
230
+ X : array-like of shape (n_samples, n_features)
231
+ The data matrix.
232
+
233
+ Returns
234
+ -------
235
+ is_inlier : ndarray of shape (n_samples,)
236
+ Returns -1 for anomalies/outliers and +1 for inliers.
237
+ """
238
+ values = self.decision_function(X)
239
+ is_inlier = np.full(values.shape[0], -1, dtype=int)
240
+ is_inlier[values >= 0] = 1
241
+
242
+ return is_inlier
243
+
244
+ def score(self, X, y, sample_weight=None):
245
+ """Return the mean accuracy on the given test data and labels.
246
+
247
+ In multi-label classification, this is the subset accuracy
248
+ which is a harsh metric since you require for each sample that
249
+ each label set be correctly predicted.
250
+
251
+ Parameters
252
+ ----------
253
+ X : array-like of shape (n_samples, n_features)
254
+ Test samples.
255
+
256
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
257
+ True labels for X.
258
+
259
+ sample_weight : array-like of shape (n_samples,), default=None
260
+ Sample weights.
261
+
262
+ Returns
263
+ -------
264
+ score : float
265
+ Mean accuracy of self.predict(X) w.r.t. y.
266
+ """
267
+ return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc ADDED
Binary file (7.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc ADDED
Binary file (8.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc ADDED
Binary file (4.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Alexandre Gramfort <[email protected]>
2
+ # Gael Varoquaux <[email protected]>
3
+ # Virgile Fritsch <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from sklearn import datasets
11
+ from sklearn.covariance import (
12
+ OAS,
13
+ EmpiricalCovariance,
14
+ LedoitWolf,
15
+ ShrunkCovariance,
16
+ empirical_covariance,
17
+ ledoit_wolf,
18
+ ledoit_wolf_shrinkage,
19
+ oas,
20
+ shrunk_covariance,
21
+ )
22
+ from sklearn.covariance._shrunk_covariance import _ledoit_wolf
23
+ from sklearn.utils._testing import (
24
+ assert_allclose,
25
+ assert_almost_equal,
26
+ assert_array_almost_equal,
27
+ assert_array_equal,
28
+ )
29
+
30
+ from .._shrunk_covariance import _oas
31
+
32
+ X, _ = datasets.load_diabetes(return_X_y=True)
33
+ X_1d = X[:, 0]
34
+ n_samples, n_features = X.shape
35
+
36
+
37
+ def test_covariance():
38
+ # Tests Covariance module on a simple dataset.
39
+ # test covariance fit from data
40
+ cov = EmpiricalCovariance()
41
+ cov.fit(X)
42
+ emp_cov = empirical_covariance(X)
43
+ assert_array_almost_equal(emp_cov, cov.covariance_, 4)
44
+ assert_almost_equal(cov.error_norm(emp_cov), 0)
45
+ assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0)
46
+ assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0)
47
+ assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0)
48
+ assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0)
49
+ with pytest.raises(NotImplementedError):
50
+ cov.error_norm(emp_cov, norm="foo")
51
+ # Mahalanobis distances computation test
52
+ mahal_dist = cov.mahalanobis(X)
53
+ assert np.amin(mahal_dist) > 0
54
+
55
+ # test with n_features = 1
56
+ X_1d = X[:, 0].reshape((-1, 1))
57
+ cov = EmpiricalCovariance()
58
+ cov.fit(X_1d)
59
+ assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
60
+ assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
61
+ assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0)
62
+
63
+ # test with one sample
64
+ # Create X with 1 sample and 5 features
65
+ X_1sample = np.arange(5).reshape(1, 5)
66
+ cov = EmpiricalCovariance()
67
+ warn_msg = "Only one sample available. You may want to reshape your data array"
68
+ with pytest.warns(UserWarning, match=warn_msg):
69
+ cov.fit(X_1sample)
70
+
71
+ assert_array_almost_equal(cov.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
72
+
73
+ # test integer type
74
+ X_integer = np.asarray([[0, 1], [1, 0]])
75
+ result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
76
+ assert_array_almost_equal(empirical_covariance(X_integer), result)
77
+
78
+ # test centered case
79
+ cov = EmpiricalCovariance(assume_centered=True)
80
+ cov.fit(X)
81
+ assert_array_equal(cov.location_, np.zeros(X.shape[1]))
82
+
83
+
84
+ @pytest.mark.parametrize("n_matrices", [1, 3])
85
+ def test_shrunk_covariance_func(n_matrices):
86
+ """Check `shrunk_covariance` function."""
87
+
88
+ n_features = 2
89
+ cov = np.ones((n_features, n_features))
90
+ cov_target = np.array([[1, 0.5], [0.5, 1]])
91
+
92
+ if n_matrices > 1:
93
+ cov = np.repeat(cov[np.newaxis, ...], n_matrices, axis=0)
94
+ cov_target = np.repeat(cov_target[np.newaxis, ...], n_matrices, axis=0)
95
+
96
+ cov_shrunk = shrunk_covariance(cov, 0.5)
97
+ assert_allclose(cov_shrunk, cov_target)
98
+
99
+
100
+ def test_shrunk_covariance():
101
+ """Check consistency between `ShrunkCovariance` and `shrunk_covariance`."""
102
+
103
+ # Tests ShrunkCovariance module on a simple dataset.
104
+ # compare shrunk covariance obtained from data and from MLE estimate
105
+ cov = ShrunkCovariance(shrinkage=0.5)
106
+ cov.fit(X)
107
+ assert_array_almost_equal(
108
+ shrunk_covariance(empirical_covariance(X), shrinkage=0.5), cov.covariance_, 4
109
+ )
110
+
111
+ # same test with shrinkage not provided
112
+ cov = ShrunkCovariance()
113
+ cov.fit(X)
114
+ assert_array_almost_equal(
115
+ shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4
116
+ )
117
+
118
+ # same test with shrinkage = 0 (<==> empirical_covariance)
119
+ cov = ShrunkCovariance(shrinkage=0.0)
120
+ cov.fit(X)
121
+ assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
122
+
123
+ # test with n_features = 1
124
+ X_1d = X[:, 0].reshape((-1, 1))
125
+ cov = ShrunkCovariance(shrinkage=0.3)
126
+ cov.fit(X_1d)
127
+ assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
128
+
129
+ # test shrinkage coeff on a simple data set (without saving precision)
130
+ cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
131
+ cov.fit(X)
132
+ assert cov.precision_ is None
133
+
134
+
135
+ def test_ledoit_wolf():
136
+ # Tests LedoitWolf module on a simple dataset.
137
+ # test shrinkage coeff on a simple data set
138
+ X_centered = X - X.mean(axis=0)
139
+ lw = LedoitWolf(assume_centered=True)
140
+ lw.fit(X_centered)
141
+ shrinkage_ = lw.shrinkage_
142
+
143
+ score_ = lw.score(X_centered)
144
+ assert_almost_equal(
145
+ ledoit_wolf_shrinkage(X_centered, assume_centered=True), shrinkage_
146
+ )
147
+ assert_almost_equal(
148
+ ledoit_wolf_shrinkage(X_centered, assume_centered=True, block_size=6),
149
+ shrinkage_,
150
+ )
151
+ # compare shrunk covariance obtained from data and from MLE estimate
152
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(
153
+ X_centered, assume_centered=True
154
+ )
155
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
156
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
157
+ # compare estimates given by LW and ShrunkCovariance
158
+ scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
159
+ scov.fit(X_centered)
160
+ assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
161
+
162
+ # test with n_features = 1
163
+ X_1d = X[:, 0].reshape((-1, 1))
164
+ lw = LedoitWolf(assume_centered=True)
165
+ lw.fit(X_1d)
166
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d, assume_centered=True)
167
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
168
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
169
+ assert_array_almost_equal((X_1d**2).sum() / n_samples, lw.covariance_, 4)
170
+
171
+ # test shrinkage coeff on a simple data set (without saving precision)
172
+ lw = LedoitWolf(store_precision=False, assume_centered=True)
173
+ lw.fit(X_centered)
174
+ assert_almost_equal(lw.score(X_centered), score_, 4)
175
+ assert lw.precision_ is None
176
+
177
+ # Same tests without assuming centered data
178
+ # test shrinkage coeff on a simple data set
179
+ lw = LedoitWolf()
180
+ lw.fit(X)
181
+ assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
182
+ assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
183
+ assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
184
+ assert_almost_equal(
185
+ lw.shrinkage_, _ledoit_wolf(X=X, assume_centered=False, block_size=10000)[1]
186
+ )
187
+ assert_almost_equal(lw.score(X), score_, 4)
188
+ # compare shrunk covariance obtained from data and from MLE estimate
189
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X)
190
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
191
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
192
+ # compare estimates given by LW and ShrunkCovariance
193
+ scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
194
+ scov.fit(X)
195
+ assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
196
+
197
+ # test with n_features = 1
198
+ X_1d = X[:, 0].reshape((-1, 1))
199
+ lw = LedoitWolf()
200
+ lw.fit(X_1d)
201
+ assert_allclose(
202
+ X_1d.var(ddof=0),
203
+ _ledoit_wolf(X=X_1d, assume_centered=False, block_size=10000)[0],
204
+ )
205
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d)
206
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
207
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
208
+ assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
209
+
210
+ # test with one sample
211
+ # warning should be raised when using only 1 sample
212
+ X_1sample = np.arange(5).reshape(1, 5)
213
+ lw = LedoitWolf()
214
+
215
+ warn_msg = "Only one sample available. You may want to reshape your data array"
216
+ with pytest.warns(UserWarning, match=warn_msg):
217
+ lw.fit(X_1sample)
218
+
219
+ assert_array_almost_equal(lw.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
220
+
221
+ # test shrinkage coeff on a simple data set (without saving precision)
222
+ lw = LedoitWolf(store_precision=False)
223
+ lw.fit(X)
224
+ assert_almost_equal(lw.score(X), score_, 4)
225
+ assert lw.precision_ is None
226
+
227
+
228
+ def _naive_ledoit_wolf_shrinkage(X):
229
+ # A simple implementation of the formulas from Ledoit & Wolf
230
+
231
+ # The computation below achieves the following computations of the
232
+ # "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
233
+ # Large-Dimensional Covariance Matrices"
234
+ # beta and delta are given in the beginning of section 3.2
235
+ n_samples, n_features = X.shape
236
+ emp_cov = empirical_covariance(X, assume_centered=False)
237
+ mu = np.trace(emp_cov) / n_features
238
+ delta_ = emp_cov.copy()
239
+ delta_.flat[:: n_features + 1] -= mu
240
+ delta = (delta_**2).sum() / n_features
241
+ X2 = X**2
242
+ beta_ = (
243
+ 1.0
244
+ / (n_features * n_samples)
245
+ * np.sum(np.dot(X2.T, X2) / n_samples - emp_cov**2)
246
+ )
247
+
248
+ beta = min(beta_, delta)
249
+ shrinkage = beta / delta
250
+ return shrinkage
251
+
252
+
253
+ def test_ledoit_wolf_small():
254
+ # Compare our blocked implementation to the naive implementation
255
+ X_small = X[:, :4]
256
+ lw = LedoitWolf()
257
+ lw.fit(X_small)
258
+ shrinkage_ = lw.shrinkage_
259
+
260
+ assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
261
+
262
+
263
+ def test_ledoit_wolf_large():
264
+ # test that ledoit_wolf doesn't error on data that is wider than block_size
265
+ rng = np.random.RandomState(0)
266
+ # use a number of features that is larger than the block-size
267
+ X = rng.normal(size=(10, 20))
268
+ lw = LedoitWolf(block_size=10).fit(X)
269
+ # check that covariance is about diagonal (random normal noise)
270
+ assert_almost_equal(lw.covariance_, np.eye(20), 0)
271
+ cov = lw.covariance_
272
+
273
+ # check that the result is consistent with not splitting data into blocks.
274
+ lw = LedoitWolf(block_size=25).fit(X)
275
+ assert_almost_equal(lw.covariance_, cov)
276
+
277
+
278
+ @pytest.mark.parametrize(
279
+ "ledoit_wolf_fitting_function", [LedoitWolf().fit, ledoit_wolf_shrinkage]
280
+ )
281
+ def test_ledoit_wolf_empty_array(ledoit_wolf_fitting_function):
282
+ """Check that we validate X and raise proper error with 0-sample array."""
283
+ X_empty = np.zeros((0, 2))
284
+ with pytest.raises(ValueError, match="Found array with 0 sample"):
285
+ ledoit_wolf_fitting_function(X_empty)
286
+
287
+
288
+ def test_oas():
289
+ # Tests OAS module on a simple dataset.
290
+ # test shrinkage coeff on a simple data set
291
+ X_centered = X - X.mean(axis=0)
292
+ oa = OAS(assume_centered=True)
293
+ oa.fit(X_centered)
294
+ shrinkage_ = oa.shrinkage_
295
+ score_ = oa.score(X_centered)
296
+ # compare shrunk covariance obtained from data and from MLE estimate
297
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered, assume_centered=True)
298
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
299
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
300
+ # compare estimates given by OAS and ShrunkCovariance
301
+ scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
302
+ scov.fit(X_centered)
303
+ assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
304
+
305
+ # test with n_features = 1
306
+ X_1d = X[:, 0:1]
307
+ oa = OAS(assume_centered=True)
308
+ oa.fit(X_1d)
309
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True)
310
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
311
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
312
+ assert_array_almost_equal((X_1d**2).sum() / n_samples, oa.covariance_, 4)
313
+
314
+ # test shrinkage coeff on a simple data set (without saving precision)
315
+ oa = OAS(store_precision=False, assume_centered=True)
316
+ oa.fit(X_centered)
317
+ assert_almost_equal(oa.score(X_centered), score_, 4)
318
+ assert oa.precision_ is None
319
+
320
+ # Same tests without assuming centered data--------------------------------
321
+ # test shrinkage coeff on a simple data set
322
+ oa = OAS()
323
+ oa.fit(X)
324
+ assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
325
+ assert_almost_equal(oa.score(X), score_, 4)
326
+ # compare shrunk covariance obtained from data and from MLE estimate
327
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X)
328
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
329
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
330
+ # compare estimates given by OAS and ShrunkCovariance
331
+ scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
332
+ scov.fit(X)
333
+ assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
334
+
335
+ # test with n_features = 1
336
+ X_1d = X[:, 0].reshape((-1, 1))
337
+ oa = OAS()
338
+ oa.fit(X_1d)
339
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d)
340
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
341
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
342
+ assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
343
+
344
+ # test with one sample
345
+ # warning should be raised when using only 1 sample
346
+ X_1sample = np.arange(5).reshape(1, 5)
347
+ oa = OAS()
348
+ warn_msg = "Only one sample available. You may want to reshape your data array"
349
+ with pytest.warns(UserWarning, match=warn_msg):
350
+ oa.fit(X_1sample)
351
+
352
+ assert_array_almost_equal(oa.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
353
+
354
+ # test shrinkage coeff on a simple data set (without saving precision)
355
+ oa = OAS(store_precision=False)
356
+ oa.fit(X)
357
+ assert_almost_equal(oa.score(X), score_, 4)
358
+ assert oa.precision_ is None
359
+
360
+ # test function _oas without assuming centered data
361
+ X_1f = X[:, 0:1]
362
+ oa = OAS()
363
+ oa.fit(X_1f)
364
+ # compare shrunk covariance obtained from data and from MLE estimate
365
+ _oa_cov_from_mle, _oa_shrinkage_from_mle = _oas(X_1f)
366
+ assert_array_almost_equal(_oa_cov_from_mle, oa.covariance_, 4)
367
+ assert_almost_equal(_oa_shrinkage_from_mle, oa.shrinkage_)
368
+ assert_array_almost_equal((X_1f**2).sum() / n_samples, oa.covariance_, 4)
369
+
370
+
371
+ def test_EmpiricalCovariance_validates_mahalanobis():
372
+ """Checks that EmpiricalCovariance validates data with mahalanobis."""
373
+ cov = EmpiricalCovariance().fit(X)
374
+
375
+ msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input"
376
+ with pytest.raises(ValueError, match=msg):
377
+ cov.mahalanobis(X[:, :2])
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope).
3
+ """
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from sklearn.covariance import EllipticEnvelope
9
+ from sklearn.exceptions import NotFittedError
10
+ from sklearn.utils._testing import (
11
+ assert_almost_equal,
12
+ assert_array_almost_equal,
13
+ assert_array_equal,
14
+ )
15
+
16
+
17
+ def test_elliptic_envelope(global_random_seed):
18
+ rnd = np.random.RandomState(global_random_seed)
19
+ X = rnd.randn(100, 10)
20
+ clf = EllipticEnvelope(contamination=0.1)
21
+ with pytest.raises(NotFittedError):
22
+ clf.predict(X)
23
+ with pytest.raises(NotFittedError):
24
+ clf.decision_function(X)
25
+ clf.fit(X)
26
+ y_pred = clf.predict(X)
27
+ scores = clf.score_samples(X)
28
+ decisions = clf.decision_function(X)
29
+
30
+ assert_array_almost_equal(scores, -clf.mahalanobis(X))
31
+ assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
32
+ assert_almost_equal(
33
+ clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.0
34
+ )
35
+ assert sum(y_pred == -1) == sum(decisions < 0)
36
+
37
+
38
+ def test_score_samples():
39
+ X_train = [[1, 1], [1, 2], [2, 1]]
40
+ clf1 = EllipticEnvelope(contamination=0.2).fit(X_train)
41
+ clf2 = EllipticEnvelope().fit(X_train)
42
+ assert_array_equal(
43
+ clf1.score_samples([[2.0, 2.0]]),
44
+ clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
45
+ )
46
+ assert_array_equal(
47
+ clf2.score_samples([[2.0, 2.0]]),
48
+ clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
49
+ )
50
+ assert_array_equal(
51
+ clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
52
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test the graphical_lasso module.
2
+ """
3
+ import sys
4
+ from io import StringIO
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from numpy.testing import assert_allclose
9
+ from scipy import linalg
10
+
11
+ from sklearn import datasets
12
+ from sklearn.covariance import (
13
+ GraphicalLasso,
14
+ GraphicalLassoCV,
15
+ empirical_covariance,
16
+ graphical_lasso,
17
+ )
18
+ from sklearn.datasets import make_sparse_spd_matrix
19
+ from sklearn.utils import check_random_state
20
+ from sklearn.utils._testing import (
21
+ _convert_container,
22
+ assert_array_almost_equal,
23
+ assert_array_less,
24
+ )
25
+
26
+
27
+ def test_graphical_lassos(random_state=1):
28
+ """Test the graphical lasso solvers.
29
+
30
+ This checks is unstable for some random seeds where the covariance found with "cd"
31
+ and "lars" solvers are different (4 cases / 100 tries).
32
+ """
33
+ # Sample data from a sparse multivariate normal
34
+ dim = 20
35
+ n_samples = 100
36
+ random_state = check_random_state(random_state)
37
+ prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state)
38
+ cov = linalg.inv(prec)
39
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
40
+ emp_cov = empirical_covariance(X)
41
+
42
+ for alpha in (0.0, 0.1, 0.25):
43
+ covs = dict()
44
+ icovs = dict()
45
+ for method in ("cd", "lars"):
46
+ cov_, icov_, costs = graphical_lasso(
47
+ emp_cov, return_costs=True, alpha=alpha, mode=method
48
+ )
49
+ covs[method] = cov_
50
+ icovs[method] = icov_
51
+ costs, dual_gap = np.array(costs).T
52
+ # Check that the costs always decrease (doesn't hold if alpha == 0)
53
+ if not alpha == 0:
54
+ # use 1e-12 since the cost can be exactly 0
55
+ assert_array_less(np.diff(costs), 1e-12)
56
+ # Check that the 2 approaches give similar results
57
+ assert_allclose(covs["cd"], covs["lars"], atol=5e-4)
58
+ assert_allclose(icovs["cd"], icovs["lars"], atol=5e-4)
59
+
60
+ # Smoke test the estimator
61
+ model = GraphicalLasso(alpha=0.25).fit(X)
62
+ model.score(X)
63
+ assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4)
64
+ assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4)
65
+
66
+ # For a centered matrix, assume_centered could be chosen True or False
67
+ # Check that this returns indeed the same result for centered data
68
+ Z = X - X.mean(0)
69
+ precs = list()
70
+ for assume_centered in (False, True):
71
+ prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_
72
+ precs.append(prec_)
73
+ assert_array_almost_equal(precs[0], precs[1])
74
+
75
+
76
+ def test_graphical_lasso_when_alpha_equals_0():
77
+ """Test graphical_lasso's early return condition when alpha=0."""
78
+ X = np.random.randn(100, 10)
79
+ emp_cov = empirical_covariance(X, assume_centered=True)
80
+
81
+ model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov)
82
+ assert_allclose(model.precision_, np.linalg.inv(emp_cov))
83
+
84
+ _, precision = graphical_lasso(emp_cov, alpha=0)
85
+ assert_allclose(precision, np.linalg.inv(emp_cov))
86
+
87
+
88
+ @pytest.mark.parametrize("mode", ["cd", "lars"])
89
+ def test_graphical_lasso_n_iter(mode):
90
+ X, _ = datasets.make_classification(n_samples=5_000, n_features=20, random_state=0)
91
+ emp_cov = empirical_covariance(X)
92
+
93
+ _, _, n_iter = graphical_lasso(
94
+ emp_cov, 0.2, mode=mode, max_iter=2, return_n_iter=True
95
+ )
96
+ assert n_iter == 2
97
+
98
+
99
+ def test_graphical_lasso_iris():
100
+ # Hard-coded solution from R glasso package for alpha=1.0
101
+ # (need to set penalize.diagonal to FALSE)
102
+ cov_R = np.array(
103
+ [
104
+ [0.68112222, 0.0000000, 0.265820, 0.02464314],
105
+ [0.00000000, 0.1887129, 0.000000, 0.00000000],
106
+ [0.26582000, 0.0000000, 3.095503, 0.28697200],
107
+ [0.02464314, 0.0000000, 0.286972, 0.57713289],
108
+ ]
109
+ )
110
+ icov_R = np.array(
111
+ [
112
+ [1.5190747, 0.000000, -0.1304475, 0.0000000],
113
+ [0.0000000, 5.299055, 0.0000000, 0.0000000],
114
+ [-0.1304475, 0.000000, 0.3498624, -0.1683946],
115
+ [0.0000000, 0.000000, -0.1683946, 1.8164353],
116
+ ]
117
+ )
118
+ X = datasets.load_iris().data
119
+ emp_cov = empirical_covariance(X)
120
+ for method in ("cd", "lars"):
121
+ cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method)
122
+ assert_array_almost_equal(cov, cov_R)
123
+ assert_array_almost_equal(icov, icov_R)
124
+
125
+
126
+ def test_graph_lasso_2D():
127
+ # Hard-coded solution from Python skggm package
128
+ # obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
129
+ cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]])
130
+
131
+ icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]])
132
+ X = datasets.load_iris().data[:, 2:]
133
+ emp_cov = empirical_covariance(X)
134
+ for method in ("cd", "lars"):
135
+ cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method)
136
+ assert_array_almost_equal(cov, cov_skggm)
137
+ assert_array_almost_equal(icov, icov_skggm)
138
+
139
+
140
+ def test_graphical_lasso_iris_singular():
141
+ # Small subset of rows to test the rank-deficient case
142
+ # Need to choose samples such that none of the variances are zero
143
+ indices = np.arange(10, 13)
144
+
145
+ # Hard-coded solution from R glasso package for alpha=0.01
146
+ cov_R = np.array(
147
+ [
148
+ [0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
149
+ [0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
150
+ [0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
151
+ [0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222],
152
+ ]
153
+ )
154
+ icov_R = np.array(
155
+ [
156
+ [24.42244057, -16.831679593, 0.0, 0.0],
157
+ [-16.83168201, 24.351841681, -6.206896552, -12.5],
158
+ [0.0, -6.206896171, 153.103448276, 0.0],
159
+ [0.0, -12.499999143, 0.0, 462.5],
160
+ ]
161
+ )
162
+ X = datasets.load_iris().data[indices, :]
163
+ emp_cov = empirical_covariance(X)
164
+ for method in ("cd", "lars"):
165
+ cov, icov = graphical_lasso(
166
+ emp_cov, alpha=0.01, return_costs=False, mode=method
167
+ )
168
+ assert_array_almost_equal(cov, cov_R, decimal=5)
169
+ assert_array_almost_equal(icov, icov_R, decimal=5)
170
+
171
+
172
+ def test_graphical_lasso_cv(random_state=1):
173
+ # Sample data from a sparse multivariate normal
174
+ dim = 5
175
+ n_samples = 6
176
+ random_state = check_random_state(random_state)
177
+ prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state)
178
+ cov = linalg.inv(prec)
179
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
180
+ # Capture stdout, to smoke test the verbose mode
181
+ orig_stdout = sys.stdout
182
+ try:
183
+ sys.stdout = StringIO()
184
+ # We need verbose very high so that Parallel prints on stdout
185
+ GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
186
+ finally:
187
+ sys.stdout = orig_stdout
188
+
189
+
190
+ @pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"])
191
+ def test_graphical_lasso_cv_alphas_iterable(alphas_container_type):
192
+ """Check that we can pass an array-like to `alphas`.
193
+
194
+ Non-regression test for:
195
+ https://github.com/scikit-learn/scikit-learn/issues/22489
196
+ """
197
+ true_cov = np.array(
198
+ [
199
+ [0.8, 0.0, 0.2, 0.0],
200
+ [0.0, 0.4, 0.0, 0.0],
201
+ [0.2, 0.0, 0.3, 0.1],
202
+ [0.0, 0.0, 0.1, 0.7],
203
+ ]
204
+ )
205
+ rng = np.random.RandomState(0)
206
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
207
+ alphas = _convert_container([0.02, 0.03], alphas_container_type)
208
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
209
+
210
+
211
+ @pytest.mark.parametrize(
212
+ "alphas,err_type,err_msg",
213
+ [
214
+ ([-0.02, 0.03], ValueError, "must be > 0"),
215
+ ([0, 0.03], ValueError, "must be > 0"),
216
+ (["not_number", 0.03], TypeError, "must be an instance of float"),
217
+ ],
218
+ )
219
+ def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg):
220
+ """Check that if an array-like containing a value
221
+ outside of (0, inf] is passed to `alphas`, a ValueError is raised.
222
+ Check if a string is passed, a TypeError is raised.
223
+ """
224
+ true_cov = np.array(
225
+ [
226
+ [0.8, 0.0, 0.2, 0.0],
227
+ [0.0, 0.4, 0.0, 0.0],
228
+ [0.2, 0.0, 0.3, 0.1],
229
+ [0.0, 0.0, 0.1, 0.7],
230
+ ]
231
+ )
232
+ rng = np.random.RandomState(0)
233
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
234
+
235
+ with pytest.raises(err_type, match=err_msg):
236
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
237
+
238
+
239
+ def test_graphical_lasso_cv_scores():
240
+ splits = 4
241
+ n_alphas = 5
242
+ n_refinements = 3
243
+ true_cov = np.array(
244
+ [
245
+ [0.8, 0.0, 0.2, 0.0],
246
+ [0.0, 0.4, 0.0, 0.0],
247
+ [0.2, 0.0, 0.3, 0.1],
248
+ [0.0, 0.0, 0.1, 0.7],
249
+ ]
250
+ )
251
+ rng = np.random.RandomState(0)
252
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
253
+ cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit(
254
+ X
255
+ )
256
+
257
+ cv_results = cov.cv_results_
258
+ # alpha and one for each split
259
+
260
+ total_alphas = n_refinements * n_alphas + 1
261
+ keys = ["alphas"]
262
+ split_keys = [f"split{i}_test_score" for i in range(splits)]
263
+ for key in keys + split_keys:
264
+ assert key in cv_results
265
+ assert len(cv_results[key]) == total_alphas
266
+
267
+ cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
268
+ expected_mean = cv_scores.mean(axis=0)
269
+ expected_std = cv_scores.std(axis=0)
270
+
271
+ assert_allclose(cov.cv_results_["mean_test_score"], expected_mean)
272
+ assert_allclose(cov.cv_results_["std_test_score"], expected_std)
273
+
274
+
275
+ # TODO(1.5): remove in 1.5
276
+ def test_graphical_lasso_cov_init_deprecation():
277
+ """Check that we raise a deprecation warning if providing `cov_init` in
278
+ `graphical_lasso`."""
279
+ rng, dim, n_samples = np.random.RandomState(0), 20, 100
280
+ prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=0)
281
+ cov = linalg.inv(prec)
282
+ X = rng.multivariate_normal(np.zeros(dim), cov, size=n_samples)
283
+
284
+ emp_cov = empirical_covariance(X)
285
+ with pytest.warns(FutureWarning, match="cov_init parameter is deprecated"):
286
+ graphical_lasso(emp_cov, alpha=0.1, cov_init=emp_cov)
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Alexandre Gramfort <[email protected]>
2
+ # Gael Varoquaux <[email protected]>
3
+ # Virgile Fritsch <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ import itertools
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from sklearn import datasets
13
+ from sklearn.covariance import MinCovDet, empirical_covariance, fast_mcd
14
+ from sklearn.utils._testing import assert_array_almost_equal
15
+
16
+ X = datasets.load_iris().data
17
+ X_1d = X[:, 0]
18
+ n_samples, n_features = X.shape
19
+
20
+
21
+ def test_mcd(global_random_seed):
22
+ # Tests the FastMCD algorithm implementation
23
+ # Small data set
24
+ # test without outliers (random independent normal data)
25
+ launch_mcd_on_dataset(100, 5, 0, 0.02, 0.1, 75, global_random_seed)
26
+ # test with a contaminated data set (medium contamination)
27
+ launch_mcd_on_dataset(100, 5, 20, 0.3, 0.3, 65, global_random_seed)
28
+ # test with a contaminated data set (strong contamination)
29
+ launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50, global_random_seed)
30
+
31
+ # Medium data set
32
+ launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540, global_random_seed)
33
+
34
+ # Large data set
35
+ launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870, global_random_seed)
36
+
37
+ # 1D data set
38
+ launch_mcd_on_dataset(500, 1, 100, 0.02, 0.02, 350, global_random_seed)
39
+
40
+
41
+ def test_fast_mcd_on_invalid_input():
42
+ X = np.arange(100)
43
+ msg = "Expected 2D array, got 1D array instead"
44
+ with pytest.raises(ValueError, match=msg):
45
+ fast_mcd(X)
46
+
47
+
48
+ def test_mcd_class_on_invalid_input():
49
+ X = np.arange(100)
50
+ mcd = MinCovDet()
51
+ msg = "Expected 2D array, got 1D array instead"
52
+ with pytest.raises(ValueError, match=msg):
53
+ mcd.fit(X)
54
+
55
+
56
+ def launch_mcd_on_dataset(
57
+ n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support, seed
58
+ ):
59
+ rand_gen = np.random.RandomState(seed)
60
+ data = rand_gen.randn(n_samples, n_features)
61
+ # add some outliers
62
+ outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
63
+ outliers_offset = 10.0 * (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
64
+ data[outliers_index] += outliers_offset
65
+ inliers_mask = np.ones(n_samples).astype(bool)
66
+ inliers_mask[outliers_index] = False
67
+
68
+ pure_data = data[inliers_mask]
69
+ # compute MCD by fitting an object
70
+ mcd_fit = MinCovDet(random_state=seed).fit(data)
71
+ T = mcd_fit.location_
72
+ S = mcd_fit.covariance_
73
+ H = mcd_fit.support_
74
+ # compare with the estimates learnt from the inliers
75
+ error_location = np.mean((pure_data.mean(0) - T) ** 2)
76
+ assert error_location < tol_loc
77
+ error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
78
+ assert error_cov < tol_cov
79
+ assert np.sum(H) >= tol_support
80
+ assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
81
+
82
+
83
+ def test_mcd_issue1127():
84
+ # Check that the code does not break with X.shape = (3, 1)
85
+ # (i.e. n_support = n_samples)
86
+ rnd = np.random.RandomState(0)
87
+ X = rnd.normal(size=(3, 1))
88
+ mcd = MinCovDet()
89
+ mcd.fit(X)
90
+
91
+
92
+ def test_mcd_issue3367(global_random_seed):
93
+ # Check that MCD completes when the covariance matrix is singular
94
+ # i.e. one of the rows and columns are all zeros
95
+ rand_gen = np.random.RandomState(global_random_seed)
96
+
97
+ # Think of these as the values for X and Y -> 10 values between -5 and 5
98
+ data_values = np.linspace(-5, 5, 10).tolist()
99
+ # Get the cartesian product of all possible coordinate pairs from above set
100
+ data = np.array(list(itertools.product(data_values, data_values)))
101
+
102
+ # Add a third column that's all zeros to make our data a set of point
103
+ # within a plane, which means that the covariance matrix will be singular
104
+ data = np.hstack((data, np.zeros((data.shape[0], 1))))
105
+
106
+ # The below line of code should raise an exception if the covariance matrix
107
+ # is singular. As a further test, since we have points in XYZ, the
108
+ # principle components (Eigenvectors) of these directly relate to the
109
+ # geometry of the points. Since it's a plane, we should be able to test
110
+ # that the Eigenvector that corresponds to the smallest Eigenvalue is the
111
+ # plane normal, specifically [0, 0, 1], since everything is in the XY plane
112
+ # (as I've set it up above). To do this one would start by:
113
+ #
114
+ # evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
115
+ # normal = evecs[:, np.argmin(evals)]
116
+ #
117
+ # After which we need to assert that our `normal` is equal to [0, 0, 1].
118
+ # Do note that there is floating point error associated with this, so it's
119
+ # best to subtract the two and then compare some small tolerance (e.g.
120
+ # 1e-12).
121
+ MinCovDet(random_state=rand_gen).fit(data)
122
+
123
+
124
+ def test_mcd_support_covariance_is_zero():
125
+ # Check that MCD returns a ValueError with informative message when the
126
+ # covariance of the support data is equal to 0.
127
+ X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
128
+ X_1 = X_1.reshape(-1, 1)
129
+ X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
130
+ X_2 = X_2.reshape(-1, 1)
131
+ msg = (
132
+ "The covariance matrix of the support data is equal to 0, try to "
133
+ "increase support_fraction"
134
+ )
135
+ for X in [X_1, X_2]:
136
+ with pytest.raises(ValueError, match=msg):
137
+ MinCovDet().fit(X)
138
+
139
+
140
+ def test_mcd_increasing_det_warning(global_random_seed):
141
+ # Check that a warning is raised if we observe increasing determinants
142
+ # during the c_step. In theory the sequence of determinants should be
143
+ # decreasing. Increasing determinants are likely due to ill-conditioned
144
+ # covariance matrices that result in poor precision matrices.
145
+
146
+ X = [
147
+ [5.1, 3.5, 1.4, 0.2],
148
+ [4.9, 3.0, 1.4, 0.2],
149
+ [4.7, 3.2, 1.3, 0.2],
150
+ [4.6, 3.1, 1.5, 0.2],
151
+ [5.0, 3.6, 1.4, 0.2],
152
+ [4.6, 3.4, 1.4, 0.3],
153
+ [5.0, 3.4, 1.5, 0.2],
154
+ [4.4, 2.9, 1.4, 0.2],
155
+ [4.9, 3.1, 1.5, 0.1],
156
+ [5.4, 3.7, 1.5, 0.2],
157
+ [4.8, 3.4, 1.6, 0.2],
158
+ [4.8, 3.0, 1.4, 0.1],
159
+ [4.3, 3.0, 1.1, 0.1],
160
+ [5.1, 3.5, 1.4, 0.3],
161
+ [5.7, 3.8, 1.7, 0.3],
162
+ [5.4, 3.4, 1.7, 0.2],
163
+ [4.6, 3.6, 1.0, 0.2],
164
+ [5.0, 3.0, 1.6, 0.2],
165
+ [5.2, 3.5, 1.5, 0.2],
166
+ ]
167
+
168
+ mcd = MinCovDet(support_fraction=0.5, random_state=global_random_seed)
169
+ warn_msg = "Determinant has increased"
170
+ with pytest.warns(RuntimeWarning, match=warn_msg):
171
+ mcd.fit(X)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_base.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generic feature selection mixin"""
2
+
3
+ # Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ from abc import ABCMeta, abstractmethod
8
+ from operator import attrgetter
9
+
10
+ import numpy as np
11
+ from scipy.sparse import csc_matrix, issparse
12
+
13
+ from ..base import TransformerMixin
14
+ from ..utils import (
15
+ _is_pandas_df,
16
+ _safe_indexing,
17
+ check_array,
18
+ safe_sqr,
19
+ )
20
+ from ..utils._set_output import _get_output_config
21
+ from ..utils._tags import _safe_tags
22
+ from ..utils.validation import _check_feature_names_in, check_is_fitted
23
+
24
+
25
+ class SelectorMixin(TransformerMixin, metaclass=ABCMeta):
26
+ """
27
+ Transformer mixin that performs feature selection given a support mask
28
+
29
+ This mixin provides a feature selector implementation with `transform` and
30
+ `inverse_transform` functionality given an implementation of
31
+ `_get_support_mask`.
32
+
33
+ Examples
34
+ --------
35
+ >>> import numpy as np
36
+ >>> from sklearn.datasets import load_iris
37
+ >>> from sklearn.base import BaseEstimator
38
+ >>> from sklearn.feature_selection import SelectorMixin
39
+ >>> class FeatureSelector(SelectorMixin, BaseEstimator):
40
+ ... def fit(self, X, y=None):
41
+ ... self.n_features_in_ = X.shape[1]
42
+ ... return self
43
+ ... def _get_support_mask(self):
44
+ ... mask = np.zeros(self.n_features_in_, dtype=bool)
45
+ ... mask[:2] = True # select the first two features
46
+ ... return mask
47
+ >>> X, y = load_iris(return_X_y=True)
48
+ >>> FeatureSelector().fit_transform(X, y).shape
49
+ (150, 2)
50
+ """
51
+
52
+ def get_support(self, indices=False):
53
+ """
54
+ Get a mask, or integer index, of the features selected.
55
+
56
+ Parameters
57
+ ----------
58
+ indices : bool, default=False
59
+ If True, the return value will be an array of integers, rather
60
+ than a boolean mask.
61
+
62
+ Returns
63
+ -------
64
+ support : array
65
+ An index that selects the retained features from a feature vector.
66
+ If `indices` is False, this is a boolean array of shape
67
+ [# input features], in which an element is True iff its
68
+ corresponding feature is selected for retention. If `indices` is
69
+ True, this is an integer array of shape [# output features] whose
70
+ values are indices into the input feature vector.
71
+ """
72
+ mask = self._get_support_mask()
73
+ return mask if not indices else np.where(mask)[0]
74
+
75
+ @abstractmethod
76
+ def _get_support_mask(self):
77
+ """
78
+ Get the boolean mask indicating which features are selected
79
+
80
+ Returns
81
+ -------
82
+ support : boolean array of shape [# input features]
83
+ An element is True iff its corresponding feature is selected for
84
+ retention.
85
+ """
86
+
87
+ def transform(self, X):
88
+ """Reduce X to the selected features.
89
+
90
+ Parameters
91
+ ----------
92
+ X : array of shape [n_samples, n_features]
93
+ The input samples.
94
+
95
+ Returns
96
+ -------
97
+ X_r : array of shape [n_samples, n_selected_features]
98
+ The input samples with only the selected features.
99
+ """
100
+ # Preserve X when X is a dataframe and the output is configured to
101
+ # be pandas.
102
+ output_config_dense = _get_output_config("transform", estimator=self)["dense"]
103
+ preserve_X = output_config_dense != "default" and _is_pandas_df(X)
104
+
105
+ # note: we use _safe_tags instead of _get_tags because this is a
106
+ # public Mixin.
107
+ X = self._validate_data(
108
+ X,
109
+ dtype=None,
110
+ accept_sparse="csr",
111
+ force_all_finite=not _safe_tags(self, key="allow_nan"),
112
+ cast_to_ndarray=not preserve_X,
113
+ reset=False,
114
+ )
115
+ return self._transform(X)
116
+
117
+ def _transform(self, X):
118
+ """Reduce X to the selected features."""
119
+ mask = self.get_support()
120
+ if not mask.any():
121
+ warnings.warn(
122
+ (
123
+ "No features were selected: either the data is"
124
+ " too noisy or the selection test too strict."
125
+ ),
126
+ UserWarning,
127
+ )
128
+ if hasattr(X, "iloc"):
129
+ return X.iloc[:, :0]
130
+ return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0))
131
+ return _safe_indexing(X, mask, axis=1)
132
+
133
+ def inverse_transform(self, X):
134
+ """Reverse the transformation operation.
135
+
136
+ Parameters
137
+ ----------
138
+ X : array of shape [n_samples, n_selected_features]
139
+ The input samples.
140
+
141
+ Returns
142
+ -------
143
+ X_r : array of shape [n_samples, n_original_features]
144
+ `X` with columns of zeros inserted where features would have
145
+ been removed by :meth:`transform`.
146
+ """
147
+ if issparse(X):
148
+ X = X.tocsc()
149
+ # insert additional entries in indptr:
150
+ # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3]
151
+ # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3]
152
+ it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))
153
+ col_nonzeros = it.ravel()
154
+ indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
155
+ Xt = csc_matrix(
156
+ (X.data, X.indices, indptr),
157
+ shape=(X.shape[0], len(indptr) - 1),
158
+ dtype=X.dtype,
159
+ )
160
+ return Xt
161
+
162
+ support = self.get_support()
163
+ X = check_array(X, dtype=None)
164
+ if support.sum() != X.shape[1]:
165
+ raise ValueError("X has a different shape than during fitting.")
166
+
167
+ if X.ndim == 1:
168
+ X = X[None, :]
169
+ Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
170
+ Xt[:, support] = X
171
+ return Xt
172
+
173
+ def get_feature_names_out(self, input_features=None):
174
+ """Mask feature names according to selected features.
175
+
176
+ Parameters
177
+ ----------
178
+ input_features : array-like of str or None, default=None
179
+ Input features.
180
+
181
+ - If `input_features` is `None`, then `feature_names_in_` is
182
+ used as feature names in. If `feature_names_in_` is not defined,
183
+ then the following input feature names are generated:
184
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
185
+ - If `input_features` is an array-like, then `input_features` must
186
+ match `feature_names_in_` if `feature_names_in_` is defined.
187
+
188
+ Returns
189
+ -------
190
+ feature_names_out : ndarray of str objects
191
+ Transformed feature names.
192
+ """
193
+ check_is_fitted(self)
194
+ input_features = _check_feature_names_in(self, input_features)
195
+ return input_features[self.get_support()]
196
+
197
+
198
+ def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1):
199
+ """
200
+ Retrieve and aggregate (ndim > 1) the feature importances
201
+ from an estimator. Also optionally applies transformation.
202
+
203
+ Parameters
204
+ ----------
205
+ estimator : estimator
206
+ A scikit-learn estimator from which we want to get the feature
207
+ importances.
208
+
209
+ getter : "auto", str or callable
210
+ An attribute or a callable to get the feature importance. If `"auto"`,
211
+ `estimator` is expected to expose `coef_` or `feature_importances`.
212
+
213
+ transform_func : {"norm", "square"}, default=None
214
+ The transform to apply to the feature importances. By default (`None`)
215
+ no transformation is applied.
216
+
217
+ norm_order : int, default=1
218
+ The norm order to apply when `transform_func="norm"`. Only applied
219
+ when `importances.ndim > 1`.
220
+
221
+ Returns
222
+ -------
223
+ importances : ndarray of shape (n_features,)
224
+ The features importances, optionally transformed.
225
+ """
226
+ if isinstance(getter, str):
227
+ if getter == "auto":
228
+ if hasattr(estimator, "coef_"):
229
+ getter = attrgetter("coef_")
230
+ elif hasattr(estimator, "feature_importances_"):
231
+ getter = attrgetter("feature_importances_")
232
+ else:
233
+ raise ValueError(
234
+ "when `importance_getter=='auto'`, the underlying "
235
+ f"estimator {estimator.__class__.__name__} should have "
236
+ "`coef_` or `feature_importances_` attribute. Either "
237
+ "pass a fitted estimator to feature selector or call fit "
238
+ "before calling transform."
239
+ )
240
+ else:
241
+ getter = attrgetter(getter)
242
+ elif not callable(getter):
243
+ raise ValueError("`importance_getter` has to be a string or `callable`")
244
+
245
+ importances = getter(estimator)
246
+
247
+ if transform_func is None:
248
+ return importances
249
+ elif transform_func == "norm":
250
+ if importances.ndim == 1:
251
+ importances = np.abs(importances)
252
+ else:
253
+ importances = np.linalg.norm(importances, axis=0, ord=norm_order)
254
+ elif transform_func == "square":
255
+ if importances.ndim == 1:
256
+ importances = safe_sqr(importances)
257
+ else:
258
+ importances = safe_sqr(importances).sum(axis=0)
259
+ else:
260
+ raise ValueError(
261
+ "Valid values for `transform_func` are "
262
+ + "None, 'norm' and 'square'. Those two "
263
+ + "transformation are only supported now"
264
+ )
265
+
266
+ return importances
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
2
+ # License: BSD 3 clause
3
+
4
+ from copy import deepcopy
5
+ from numbers import Integral, Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone
10
+ from ..exceptions import NotFittedError
11
+ from ..utils._param_validation import HasMethods, Interval, Options
12
+ from ..utils._tags import _safe_tags
13
+ from ..utils.metadata_routing import (
14
+ MetadataRouter,
15
+ MethodMapping,
16
+ _routing_enabled,
17
+ process_routing,
18
+ )
19
+ from ..utils.metaestimators import available_if
20
+ from ..utils.validation import _num_features, check_is_fitted, check_scalar
21
+ from ._base import SelectorMixin, _get_feature_importances
22
+
23
+
24
+ def _calculate_threshold(estimator, importances, threshold):
25
+ """Interpret the threshold value"""
26
+
27
+ if threshold is None:
28
+ # determine default from estimator
29
+ est_name = estimator.__class__.__name__
30
+ is_l1_penalized = hasattr(estimator, "penalty") and estimator.penalty == "l1"
31
+ is_lasso = "Lasso" in est_name
32
+ is_elasticnet_l1_penalized = "ElasticNet" in est_name and (
33
+ (hasattr(estimator, "l1_ratio_") and np.isclose(estimator.l1_ratio_, 1.0))
34
+ or (hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0))
35
+ )
36
+ if is_l1_penalized or is_lasso or is_elasticnet_l1_penalized:
37
+ # the natural default threshold is 0 when l1 penalty was used
38
+ threshold = 1e-5
39
+ else:
40
+ threshold = "mean"
41
+
42
+ if isinstance(threshold, str):
43
+ if "*" in threshold:
44
+ scale, reference = threshold.split("*")
45
+ scale = float(scale.strip())
46
+ reference = reference.strip()
47
+
48
+ if reference == "median":
49
+ reference = np.median(importances)
50
+ elif reference == "mean":
51
+ reference = np.mean(importances)
52
+ else:
53
+ raise ValueError("Unknown reference: " + reference)
54
+
55
+ threshold = scale * reference
56
+
57
+ elif threshold == "median":
58
+ threshold = np.median(importances)
59
+
60
+ elif threshold == "mean":
61
+ threshold = np.mean(importances)
62
+
63
+ else:
64
+ raise ValueError(
65
+ "Expected threshold='mean' or threshold='median' got %s" % threshold
66
+ )
67
+
68
+ else:
69
+ threshold = float(threshold)
70
+
71
+ return threshold
72
+
73
+
74
+ def _estimator_has(attr):
75
+ """Check if we can delegate a method to the underlying estimator.
76
+
77
+ First, we check the fitted `estimator_` if available, otherwise we check the
78
+ unfitted `estimator`. We raise the original `AttributeError` if `attr` does
79
+ not exist. This function is used together with `available_if`.
80
+ """
81
+
82
+ def check(self):
83
+ if hasattr(self, "estimator_"):
84
+ getattr(self.estimator_, attr)
85
+ else:
86
+ getattr(self.estimator, attr)
87
+
88
+ return True
89
+
90
+ return check
91
+
92
+
93
+ class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
94
+ """Meta-transformer for selecting features based on importance weights.
95
+
96
+ .. versionadded:: 0.17
97
+
98
+ Read more in the :ref:`User Guide <select_from_model>`.
99
+
100
+ Parameters
101
+ ----------
102
+ estimator : object
103
+ The base estimator from which the transformer is built.
104
+ This can be both a fitted (if ``prefit`` is set to True)
105
+ or a non-fitted estimator. The estimator should have a
106
+ ``feature_importances_`` or ``coef_`` attribute after fitting.
107
+ Otherwise, the ``importance_getter`` parameter should be used.
108
+
109
+ threshold : str or float, default=None
110
+ The threshold value to use for feature selection. Features whose
111
+ absolute importance value is greater or equal are kept while the others
112
+ are discarded. If "median" (resp. "mean"), then the ``threshold`` value
113
+ is the median (resp. the mean) of the feature importances. A scaling
114
+ factor (e.g., "1.25*mean") may also be used. If None and if the
115
+ estimator has a parameter penalty set to l1, either explicitly
116
+ or implicitly (e.g, Lasso), the threshold used is 1e-5.
117
+ Otherwise, "mean" is used by default.
118
+
119
+ prefit : bool, default=False
120
+ Whether a prefit model is expected to be passed into the constructor
121
+ directly or not.
122
+ If `True`, `estimator` must be a fitted estimator.
123
+ If `False`, `estimator` is fitted and updated by calling
124
+ `fit` and `partial_fit`, respectively.
125
+
126
+ norm_order : non-zero int, inf, -inf, default=1
127
+ Order of the norm used to filter the vectors of coefficients below
128
+ ``threshold`` in the case where the ``coef_`` attribute of the
129
+ estimator is of dimension 2.
130
+
131
+ max_features : int, callable, default=None
132
+ The maximum number of features to select.
133
+
134
+ - If an integer, then it specifies the maximum number of features to
135
+ allow.
136
+ - If a callable, then it specifies how to calculate the maximum number of
137
+ features allowed by using the output of `max_features(X)`.
138
+ - If `None`, then all features are kept.
139
+
140
+ To only select based on ``max_features``, set ``threshold=-np.inf``.
141
+
142
+ .. versionadded:: 0.20
143
+ .. versionchanged:: 1.1
144
+ `max_features` accepts a callable.
145
+
146
+ importance_getter : str or callable, default='auto'
147
+ If 'auto', uses the feature importance either through a ``coef_``
148
+ attribute or ``feature_importances_`` attribute of estimator.
149
+
150
+ Also accepts a string that specifies an attribute name/path
151
+ for extracting feature importance (implemented with `attrgetter`).
152
+ For example, give `regressor_.coef_` in case of
153
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
154
+ `named_steps.clf.feature_importances_` in case of
155
+ :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
156
+
157
+ If `callable`, overrides the default feature importance getter.
158
+ The callable is passed with the fitted estimator and it should
159
+ return importance for each feature.
160
+
161
+ .. versionadded:: 0.24
162
+
163
+ Attributes
164
+ ----------
165
+ estimator_ : estimator
166
+ The base estimator from which the transformer is built. This attribute
167
+ exist only when `fit` has been called.
168
+
169
+ - If `prefit=True`, it is a deep copy of `estimator`.
170
+ - If `prefit=False`, it is a clone of `estimator` and fit on the data
171
+ passed to `fit` or `partial_fit`.
172
+
173
+ n_features_in_ : int
174
+ Number of features seen during :term:`fit`. Only defined if the
175
+ underlying estimator exposes such an attribute when fit.
176
+
177
+ .. versionadded:: 0.24
178
+
179
+ max_features_ : int
180
+ Maximum number of features calculated during :term:`fit`. Only defined
181
+ if the ``max_features`` is not `None`.
182
+
183
+ - If `max_features` is an `int`, then `max_features_ = max_features`.
184
+ - If `max_features` is a callable, then `max_features_ = max_features(X)`.
185
+
186
+ .. versionadded:: 1.1
187
+
188
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
189
+ Names of features seen during :term:`fit`. Defined only when `X`
190
+ has feature names that are all strings.
191
+
192
+ .. versionadded:: 1.0
193
+
194
+ threshold_ : float
195
+ The threshold value used for feature selection.
196
+
197
+ See Also
198
+ --------
199
+ RFE : Recursive feature elimination based on importance weights.
200
+ RFECV : Recursive feature elimination with built-in cross-validated
201
+ selection of the best number of features.
202
+ SequentialFeatureSelector : Sequential cross-validation based feature
203
+ selection. Does not rely on importance weights.
204
+
205
+ Notes
206
+ -----
207
+ Allows NaN/Inf in the input if the underlying estimator does as well.
208
+
209
+ Examples
210
+ --------
211
+ >>> from sklearn.feature_selection import SelectFromModel
212
+ >>> from sklearn.linear_model import LogisticRegression
213
+ >>> X = [[ 0.87, -1.34, 0.31 ],
214
+ ... [-2.79, -0.02, -0.85 ],
215
+ ... [-1.34, -0.48, -2.55 ],
216
+ ... [ 1.92, 1.48, 0.65 ]]
217
+ >>> y = [0, 1, 0, 1]
218
+ >>> selector = SelectFromModel(estimator=LogisticRegression()).fit(X, y)
219
+ >>> selector.estimator_.coef_
220
+ array([[-0.3252..., 0.8345..., 0.4976...]])
221
+ >>> selector.threshold_
222
+ 0.55249...
223
+ >>> selector.get_support()
224
+ array([False, True, False])
225
+ >>> selector.transform(X)
226
+ array([[-1.34],
227
+ [-0.02],
228
+ [-0.48],
229
+ [ 1.48]])
230
+
231
+ Using a callable to create a selector that can use no more than half
232
+ of the input features.
233
+
234
+ >>> def half_callable(X):
235
+ ... return round(len(X[0]) / 2)
236
+ >>> half_selector = SelectFromModel(estimator=LogisticRegression(),
237
+ ... max_features=half_callable)
238
+ >>> _ = half_selector.fit(X, y)
239
+ >>> half_selector.max_features_
240
+ 2
241
+ """
242
+
243
+ _parameter_constraints: dict = {
244
+ "estimator": [HasMethods("fit")],
245
+ "threshold": [Interval(Real, None, None, closed="both"), str, None],
246
+ "prefit": ["boolean"],
247
+ "norm_order": [
248
+ Interval(Integral, None, -1, closed="right"),
249
+ Interval(Integral, 1, None, closed="left"),
250
+ Options(Real, {np.inf, -np.inf}),
251
+ ],
252
+ "max_features": [Interval(Integral, 0, None, closed="left"), callable, None],
253
+ "importance_getter": [str, callable],
254
+ }
255
+
256
+ def __init__(
257
+ self,
258
+ estimator,
259
+ *,
260
+ threshold=None,
261
+ prefit=False,
262
+ norm_order=1,
263
+ max_features=None,
264
+ importance_getter="auto",
265
+ ):
266
+ self.estimator = estimator
267
+ self.threshold = threshold
268
+ self.prefit = prefit
269
+ self.importance_getter = importance_getter
270
+ self.norm_order = norm_order
271
+ self.max_features = max_features
272
+
273
+ def _get_support_mask(self):
274
+ estimator = getattr(self, "estimator_", self.estimator)
275
+ max_features = getattr(self, "max_features_", self.max_features)
276
+
277
+ if self.prefit:
278
+ try:
279
+ check_is_fitted(self.estimator)
280
+ except NotFittedError as exc:
281
+ raise NotFittedError(
282
+ "When `prefit=True`, `estimator` is expected to be a fitted "
283
+ "estimator."
284
+ ) from exc
285
+ if callable(max_features):
286
+ # This branch is executed when `transform` is called directly and thus
287
+ # `max_features_` is not set and we fallback using `self.max_features`
288
+ # that is not validated
289
+ raise NotFittedError(
290
+ "When `prefit=True` and `max_features` is a callable, call `fit` "
291
+ "before calling `transform`."
292
+ )
293
+ elif max_features is not None and not isinstance(max_features, Integral):
294
+ raise ValueError(
295
+ f"`max_features` must be an integer. Got `max_features={max_features}` "
296
+ "instead."
297
+ )
298
+
299
+ scores = _get_feature_importances(
300
+ estimator=estimator,
301
+ getter=self.importance_getter,
302
+ transform_func="norm",
303
+ norm_order=self.norm_order,
304
+ )
305
+ threshold = _calculate_threshold(estimator, scores, self.threshold)
306
+ if self.max_features is not None:
307
+ mask = np.zeros_like(scores, dtype=bool)
308
+ candidate_indices = np.argsort(-scores, kind="mergesort")[:max_features]
309
+ mask[candidate_indices] = True
310
+ else:
311
+ mask = np.ones_like(scores, dtype=bool)
312
+ mask[scores < threshold] = False
313
+ return mask
314
+
315
+ def _check_max_features(self, X):
316
+ if self.max_features is not None:
317
+ n_features = _num_features(X)
318
+
319
+ if callable(self.max_features):
320
+ max_features = self.max_features(X)
321
+ else: # int
322
+ max_features = self.max_features
323
+
324
+ check_scalar(
325
+ max_features,
326
+ "max_features",
327
+ Integral,
328
+ min_val=0,
329
+ max_val=n_features,
330
+ )
331
+ self.max_features_ = max_features
332
+
333
+ @_fit_context(
334
+ # SelectFromModel.estimator is not validated yet
335
+ prefer_skip_nested_validation=False
336
+ )
337
+ def fit(self, X, y=None, **fit_params):
338
+ """Fit the SelectFromModel meta-transformer.
339
+
340
+ Parameters
341
+ ----------
342
+ X : array-like of shape (n_samples, n_features)
343
+ The training input samples.
344
+
345
+ y : array-like of shape (n_samples,), default=None
346
+ The target values (integers that correspond to classes in
347
+ classification, real numbers in regression).
348
+
349
+ **fit_params : dict
350
+ - If `enable_metadata_routing=False` (default):
351
+
352
+ Parameters directly passed to the `partial_fit` method of the
353
+ sub-estimator. They are ignored if `prefit=True`.
354
+
355
+ - If `enable_metadata_routing=True`:
356
+
357
+ Parameters safely routed to the `partial_fit` method of the
358
+ sub-estimator. They are ignored if `prefit=True`.
359
+
360
+ .. versionchanged:: 1.4
361
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
362
+ more details.
363
+
364
+ Returns
365
+ -------
366
+ self : object
367
+ Fitted estimator.
368
+ """
369
+ self._check_max_features(X)
370
+
371
+ if self.prefit:
372
+ try:
373
+ check_is_fitted(self.estimator)
374
+ except NotFittedError as exc:
375
+ raise NotFittedError(
376
+ "When `prefit=True`, `estimator` is expected to be a fitted "
377
+ "estimator."
378
+ ) from exc
379
+ self.estimator_ = deepcopy(self.estimator)
380
+ else:
381
+ if _routing_enabled():
382
+ routed_params = process_routing(self, "fit", **fit_params)
383
+ self.estimator_ = clone(self.estimator)
384
+ self.estimator_.fit(X, y, **routed_params.estimator.fit)
385
+ else:
386
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
387
+ self.estimator_ = clone(self.estimator)
388
+ self.estimator_.fit(X, y, **fit_params)
389
+
390
+ if hasattr(self.estimator_, "feature_names_in_"):
391
+ self.feature_names_in_ = self.estimator_.feature_names_in_
392
+ else:
393
+ self._check_feature_names(X, reset=True)
394
+
395
+ return self
396
+
397
+ @property
398
+ def threshold_(self):
399
+ """Threshold value used for feature selection."""
400
+ scores = _get_feature_importances(
401
+ estimator=self.estimator_,
402
+ getter=self.importance_getter,
403
+ transform_func="norm",
404
+ norm_order=self.norm_order,
405
+ )
406
+ return _calculate_threshold(self.estimator, scores, self.threshold)
407
+
408
+ @available_if(_estimator_has("partial_fit"))
409
+ @_fit_context(
410
+ # SelectFromModel.estimator is not validated yet
411
+ prefer_skip_nested_validation=False
412
+ )
413
+ def partial_fit(self, X, y=None, **partial_fit_params):
414
+ """Fit the SelectFromModel meta-transformer only once.
415
+
416
+ Parameters
417
+ ----------
418
+ X : array-like of shape (n_samples, n_features)
419
+ The training input samples.
420
+
421
+ y : array-like of shape (n_samples,), default=None
422
+ The target values (integers that correspond to classes in
423
+ classification, real numbers in regression).
424
+
425
+ **partial_fit_params : dict
426
+ - If `enable_metadata_routing=False` (default):
427
+
428
+ Parameters directly passed to the `partial_fit` method of the
429
+ sub-estimator.
430
+
431
+ - If `enable_metadata_routing=True`:
432
+
433
+ Parameters passed to the `partial_fit` method of the
434
+ sub-estimator. They are ignored if `prefit=True`.
435
+
436
+ .. versionchanged:: 1.4
437
+ `**partial_fit_params` are routed to the sub-estimator, if
438
+ `enable_metadata_routing=True` is set via
439
+ :func:`~sklearn.set_config`, which allows for aliasing.
440
+
441
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
442
+ more details.
443
+
444
+ Returns
445
+ -------
446
+ self : object
447
+ Fitted estimator.
448
+ """
449
+ first_call = not hasattr(self, "estimator_")
450
+
451
+ if first_call:
452
+ self._check_max_features(X)
453
+
454
+ if self.prefit:
455
+ if first_call:
456
+ try:
457
+ check_is_fitted(self.estimator)
458
+ except NotFittedError as exc:
459
+ raise NotFittedError(
460
+ "When `prefit=True`, `estimator` is expected to be a fitted "
461
+ "estimator."
462
+ ) from exc
463
+ self.estimator_ = deepcopy(self.estimator)
464
+ return self
465
+
466
+ if first_call:
467
+ self.estimator_ = clone(self.estimator)
468
+ if _routing_enabled():
469
+ routed_params = process_routing(self, "partial_fit", **partial_fit_params)
470
+ self.estimator_ = clone(self.estimator)
471
+ self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit)
472
+ else:
473
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
474
+ self.estimator_.partial_fit(X, y, **partial_fit_params)
475
+
476
+ if hasattr(self.estimator_, "feature_names_in_"):
477
+ self.feature_names_in_ = self.estimator_.feature_names_in_
478
+ else:
479
+ self._check_feature_names(X, reset=first_call)
480
+
481
+ return self
482
+
483
+ @property
484
+ def n_features_in_(self):
485
+ """Number of features seen during `fit`."""
486
+ # For consistency with other estimators we raise a AttributeError so
487
+ # that hasattr() fails if the estimator isn't fitted.
488
+ try:
489
+ check_is_fitted(self)
490
+ except NotFittedError as nfe:
491
+ raise AttributeError(
492
+ "{} object has no n_features_in_ attribute.".format(
493
+ self.__class__.__name__
494
+ )
495
+ ) from nfe
496
+
497
+ return self.estimator_.n_features_in_
498
+
499
+ def get_metadata_routing(self):
500
+ """Get metadata routing of this object.
501
+
502
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
503
+ mechanism works.
504
+
505
+ .. versionadded:: 1.4
506
+
507
+ Returns
508
+ -------
509
+ routing : MetadataRouter
510
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
511
+ routing information.
512
+ """
513
+ router = MetadataRouter(owner=self.__class__.__name__).add(
514
+ estimator=self.estimator,
515
+ method_mapping=MethodMapping()
516
+ .add(callee="partial_fit", caller="partial_fit")
517
+ .add(callee="fit", caller="fit"),
518
+ )
519
+ return router
520
+
521
+ def _more_tags(self):
522
+ return {"allow_nan": _safe_tags(self.estimator, key="allow_nan")}
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Nikolay Mayorov <[email protected]>
2
+ # License: 3-clause BSD
3
+
4
+ from numbers import Integral
5
+
6
+ import numpy as np
7
+ from scipy.sparse import issparse
8
+ from scipy.special import digamma
9
+
10
+ from ..metrics.cluster import mutual_info_score
11
+ from ..neighbors import KDTree, NearestNeighbors
12
+ from ..preprocessing import scale
13
+ from ..utils import check_random_state
14
+ from ..utils._param_validation import Interval, StrOptions, validate_params
15
+ from ..utils.multiclass import check_classification_targets
16
+ from ..utils.validation import check_array, check_X_y
17
+
18
+
19
+ def _compute_mi_cc(x, y, n_neighbors):
20
+ """Compute mutual information between two continuous variables.
21
+
22
+ Parameters
23
+ ----------
24
+ x, y : ndarray, shape (n_samples,)
25
+ Samples of two continuous random variables, must have an identical
26
+ shape.
27
+
28
+ n_neighbors : int
29
+ Number of nearest neighbors to search for each point, see [1]_.
30
+
31
+ Returns
32
+ -------
33
+ mi : float
34
+ Estimated mutual information in nat units. If it turned out to be
35
+ negative it is replaced by 0.
36
+
37
+ Notes
38
+ -----
39
+ True mutual information can't be negative. If its estimate by a numerical
40
+ method is negative, it means (providing the method is adequate) that the
41
+ mutual information is close to 0 and replacing it by 0 is a reasonable
42
+ strategy.
43
+
44
+ References
45
+ ----------
46
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
47
+ information". Phys. Rev. E 69, 2004.
48
+ """
49
+ n_samples = x.size
50
+
51
+ x = x.reshape((-1, 1))
52
+ y = y.reshape((-1, 1))
53
+ xy = np.hstack((x, y))
54
+
55
+ # Here we rely on NearestNeighbors to select the fastest algorithm.
56
+ nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors)
57
+
58
+ nn.fit(xy)
59
+ radius = nn.kneighbors()[0]
60
+ radius = np.nextafter(radius[:, -1], 0)
61
+
62
+ # KDTree is explicitly fit to allow for the querying of number of
63
+ # neighbors within a specified radius
64
+ kd = KDTree(x, metric="chebyshev")
65
+ nx = kd.query_radius(x, radius, count_only=True, return_distance=False)
66
+ nx = np.array(nx) - 1.0
67
+
68
+ kd = KDTree(y, metric="chebyshev")
69
+ ny = kd.query_radius(y, radius, count_only=True, return_distance=False)
70
+ ny = np.array(ny) - 1.0
71
+
72
+ mi = (
73
+ digamma(n_samples)
74
+ + digamma(n_neighbors)
75
+ - np.mean(digamma(nx + 1))
76
+ - np.mean(digamma(ny + 1))
77
+ )
78
+
79
+ return max(0, mi)
80
+
81
+
82
+ def _compute_mi_cd(c, d, n_neighbors):
83
+ """Compute mutual information between continuous and discrete variables.
84
+
85
+ Parameters
86
+ ----------
87
+ c : ndarray, shape (n_samples,)
88
+ Samples of a continuous random variable.
89
+
90
+ d : ndarray, shape (n_samples,)
91
+ Samples of a discrete random variable.
92
+
93
+ n_neighbors : int
94
+ Number of nearest neighbors to search for each point, see [1]_.
95
+
96
+ Returns
97
+ -------
98
+ mi : float
99
+ Estimated mutual information in nat units. If it turned out to be
100
+ negative it is replaced by 0.
101
+
102
+ Notes
103
+ -----
104
+ True mutual information can't be negative. If its estimate by a numerical
105
+ method is negative, it means (providing the method is adequate) that the
106
+ mutual information is close to 0 and replacing it by 0 is a reasonable
107
+ strategy.
108
+
109
+ References
110
+ ----------
111
+ .. [1] B. C. Ross "Mutual Information between Discrete and Continuous
112
+ Data Sets". PLoS ONE 9(2), 2014.
113
+ """
114
+ n_samples = c.shape[0]
115
+ c = c.reshape((-1, 1))
116
+
117
+ radius = np.empty(n_samples)
118
+ label_counts = np.empty(n_samples)
119
+ k_all = np.empty(n_samples)
120
+ nn = NearestNeighbors()
121
+ for label in np.unique(d):
122
+ mask = d == label
123
+ count = np.sum(mask)
124
+ if count > 1:
125
+ k = min(n_neighbors, count - 1)
126
+ nn.set_params(n_neighbors=k)
127
+ nn.fit(c[mask])
128
+ r = nn.kneighbors()[0]
129
+ radius[mask] = np.nextafter(r[:, -1], 0)
130
+ k_all[mask] = k
131
+ label_counts[mask] = count
132
+
133
+ # Ignore points with unique labels.
134
+ mask = label_counts > 1
135
+ n_samples = np.sum(mask)
136
+ label_counts = label_counts[mask]
137
+ k_all = k_all[mask]
138
+ c = c[mask]
139
+ radius = radius[mask]
140
+
141
+ kd = KDTree(c)
142
+ m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)
143
+ m_all = np.array(m_all)
144
+
145
+ mi = (
146
+ digamma(n_samples)
147
+ + np.mean(digamma(k_all))
148
+ - np.mean(digamma(label_counts))
149
+ - np.mean(digamma(m_all))
150
+ )
151
+
152
+ return max(0, mi)
153
+
154
+
155
+ def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):
156
+ """Compute mutual information between two variables.
157
+
158
+ This is a simple wrapper which selects a proper function to call based on
159
+ whether `x` and `y` are discrete or not.
160
+ """
161
+ if x_discrete and y_discrete:
162
+ return mutual_info_score(x, y)
163
+ elif x_discrete and not y_discrete:
164
+ return _compute_mi_cd(y, x, n_neighbors)
165
+ elif not x_discrete and y_discrete:
166
+ return _compute_mi_cd(x, y, n_neighbors)
167
+ else:
168
+ return _compute_mi_cc(x, y, n_neighbors)
169
+
170
+
171
+ def _iterate_columns(X, columns=None):
172
+ """Iterate over columns of a matrix.
173
+
174
+ Parameters
175
+ ----------
176
+ X : ndarray or csc_matrix, shape (n_samples, n_features)
177
+ Matrix over which to iterate.
178
+
179
+ columns : iterable or None, default=None
180
+ Indices of columns to iterate over. If None, iterate over all columns.
181
+
182
+ Yields
183
+ ------
184
+ x : ndarray, shape (n_samples,)
185
+ Columns of `X` in dense format.
186
+ """
187
+ if columns is None:
188
+ columns = range(X.shape[1])
189
+
190
+ if issparse(X):
191
+ for i in columns:
192
+ x = np.zeros(X.shape[0])
193
+ start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]
194
+ x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]
195
+ yield x
196
+ else:
197
+ for i in columns:
198
+ yield X[:, i]
199
+
200
+
201
+ def _estimate_mi(
202
+ X,
203
+ y,
204
+ discrete_features="auto",
205
+ discrete_target=False,
206
+ n_neighbors=3,
207
+ copy=True,
208
+ random_state=None,
209
+ ):
210
+ """Estimate mutual information between the features and the target.
211
+
212
+ Parameters
213
+ ----------
214
+ X : array-like or sparse matrix, shape (n_samples, n_features)
215
+ Feature matrix.
216
+
217
+ y : array-like of shape (n_samples,)
218
+ Target vector.
219
+
220
+ discrete_features : {'auto', bool, array-like}, default='auto'
221
+ If bool, then determines whether to consider all features discrete
222
+ or continuous. If array, then it should be either a boolean mask
223
+ with shape (n_features,) or array with indices of discrete features.
224
+ If 'auto', it is assigned to False for dense `X` and to True for
225
+ sparse `X`.
226
+
227
+ discrete_target : bool, default=False
228
+ Whether to consider `y` as a discrete variable.
229
+
230
+ n_neighbors : int, default=3
231
+ Number of neighbors to use for MI estimation for continuous variables,
232
+ see [1]_ and [2]_. Higher values reduce variance of the estimation, but
233
+ could introduce a bias.
234
+
235
+ copy : bool, default=True
236
+ Whether to make a copy of the given data. If set to False, the initial
237
+ data will be overwritten.
238
+
239
+ random_state : int, RandomState instance or None, default=None
240
+ Determines random number generation for adding small noise to
241
+ continuous variables in order to remove repeated values.
242
+ Pass an int for reproducible results across multiple function calls.
243
+ See :term:`Glossary <random_state>`.
244
+
245
+ Returns
246
+ -------
247
+ mi : ndarray, shape (n_features,)
248
+ Estimated mutual information between each feature and the target in
249
+ nat units. A negative value will be replaced by 0.
250
+
251
+ References
252
+ ----------
253
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
254
+ information". Phys. Rev. E 69, 2004.
255
+ .. [2] B. C. Ross "Mutual Information between Discrete and Continuous
256
+ Data Sets". PLoS ONE 9(2), 2014.
257
+ """
258
+ X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target)
259
+ n_samples, n_features = X.shape
260
+
261
+ if isinstance(discrete_features, (str, bool)):
262
+ if isinstance(discrete_features, str):
263
+ if discrete_features == "auto":
264
+ discrete_features = issparse(X)
265
+ else:
266
+ raise ValueError("Invalid string value for discrete_features.")
267
+ discrete_mask = np.empty(n_features, dtype=bool)
268
+ discrete_mask.fill(discrete_features)
269
+ else:
270
+ discrete_features = check_array(discrete_features, ensure_2d=False)
271
+ if discrete_features.dtype != "bool":
272
+ discrete_mask = np.zeros(n_features, dtype=bool)
273
+ discrete_mask[discrete_features] = True
274
+ else:
275
+ discrete_mask = discrete_features
276
+
277
+ continuous_mask = ~discrete_mask
278
+ if np.any(continuous_mask) and issparse(X):
279
+ raise ValueError("Sparse matrix `X` can't have continuous features.")
280
+
281
+ rng = check_random_state(random_state)
282
+ if np.any(continuous_mask):
283
+ X = X.astype(np.float64, copy=copy)
284
+ X[:, continuous_mask] = scale(
285
+ X[:, continuous_mask], with_mean=False, copy=False
286
+ )
287
+
288
+ # Add small noise to continuous features as advised in Kraskov et. al.
289
+ means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))
290
+ X[:, continuous_mask] += (
291
+ 1e-10
292
+ * means
293
+ * rng.standard_normal(size=(n_samples, np.sum(continuous_mask)))
294
+ )
295
+
296
+ if not discrete_target:
297
+ y = scale(y, with_mean=False)
298
+ y += (
299
+ 1e-10
300
+ * np.maximum(1, np.mean(np.abs(y)))
301
+ * rng.standard_normal(size=n_samples)
302
+ )
303
+
304
+ mi = [
305
+ _compute_mi(x, y, discrete_feature, discrete_target, n_neighbors)
306
+ for x, discrete_feature in zip(_iterate_columns(X), discrete_mask)
307
+ ]
308
+
309
+ return np.array(mi)
310
+
311
+
312
+ @validate_params(
313
+ {
314
+ "X": ["array-like", "sparse matrix"],
315
+ "y": ["array-like"],
316
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
317
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
318
+ "copy": ["boolean"],
319
+ "random_state": ["random_state"],
320
+ },
321
+ prefer_skip_nested_validation=True,
322
+ )
323
+ def mutual_info_regression(
324
+ X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None
325
+ ):
326
+ """Estimate mutual information for a continuous target variable.
327
+
328
+ Mutual information (MI) [1]_ between two random variables is a non-negative
329
+ value, which measures the dependency between the variables. It is equal
330
+ to zero if and only if two random variables are independent, and higher
331
+ values mean higher dependency.
332
+
333
+ The function relies on nonparametric methods based on entropy estimation
334
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
335
+ methods are based on the idea originally proposed in [4]_.
336
+
337
+ It can be used for univariate features selection, read more in the
338
+ :ref:`User Guide <univariate_feature_selection>`.
339
+
340
+ Parameters
341
+ ----------
342
+ X : array-like or sparse matrix, shape (n_samples, n_features)
343
+ Feature matrix.
344
+
345
+ y : array-like of shape (n_samples,)
346
+ Target vector.
347
+
348
+ discrete_features : {'auto', bool, array-like}, default='auto'
349
+ If bool, then determines whether to consider all features discrete
350
+ or continuous. If array, then it should be either a boolean mask
351
+ with shape (n_features,) or array with indices of discrete features.
352
+ If 'auto', it is assigned to False for dense `X` and to True for
353
+ sparse `X`.
354
+
355
+ n_neighbors : int, default=3
356
+ Number of neighbors to use for MI estimation for continuous variables,
357
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
358
+ could introduce a bias.
359
+
360
+ copy : bool, default=True
361
+ Whether to make a copy of the given data. If set to False, the initial
362
+ data will be overwritten.
363
+
364
+ random_state : int, RandomState instance or None, default=None
365
+ Determines random number generation for adding small noise to
366
+ continuous variables in order to remove repeated values.
367
+ Pass an int for reproducible results across multiple function calls.
368
+ See :term:`Glossary <random_state>`.
369
+
370
+ Returns
371
+ -------
372
+ mi : ndarray, shape (n_features,)
373
+ Estimated mutual information between each feature and the target in
374
+ nat units.
375
+
376
+ Notes
377
+ -----
378
+ 1. The term "discrete features" is used instead of naming them
379
+ "categorical", because it describes the essence more accurately.
380
+ For example, pixel intensities of an image are discrete features
381
+ (but hardly categorical) and you will get better results if mark them
382
+ as such. Also note, that treating a continuous variable as discrete and
383
+ vice versa will usually give incorrect results, so be attentive about
384
+ that.
385
+ 2. True mutual information can't be negative. If its estimate turns out
386
+ to be negative, it is replaced by zero.
387
+
388
+ References
389
+ ----------
390
+ .. [1] `Mutual Information
391
+ <https://en.wikipedia.org/wiki/Mutual_information>`_
392
+ on Wikipedia.
393
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
394
+ information". Phys. Rev. E 69, 2004.
395
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
396
+ Data Sets". PLoS ONE 9(2), 2014.
397
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
398
+ of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
399
+
400
+ Examples
401
+ --------
402
+ >>> from sklearn.datasets import make_regression
403
+ >>> from sklearn.feature_selection import mutual_info_regression
404
+ >>> X, y = make_regression(
405
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
406
+ ... )
407
+ >>> mutual_info_regression(X, y)
408
+ array([0.1..., 2.6... , 0.0...])
409
+ """
410
+ return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state)
411
+
412
+
413
+ @validate_params(
414
+ {
415
+ "X": ["array-like", "sparse matrix"],
416
+ "y": ["array-like"],
417
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
418
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
419
+ "copy": ["boolean"],
420
+ "random_state": ["random_state"],
421
+ },
422
+ prefer_skip_nested_validation=True,
423
+ )
424
+ def mutual_info_classif(
425
+ X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None
426
+ ):
427
+ """Estimate mutual information for a discrete target variable.
428
+
429
+ Mutual information (MI) [1]_ between two random variables is a non-negative
430
+ value, which measures the dependency between the variables. It is equal
431
+ to zero if and only if two random variables are independent, and higher
432
+ values mean higher dependency.
433
+
434
+ The function relies on nonparametric methods based on entropy estimation
435
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
436
+ methods are based on the idea originally proposed in [4]_.
437
+
438
+ It can be used for univariate features selection, read more in the
439
+ :ref:`User Guide <univariate_feature_selection>`.
440
+
441
+ Parameters
442
+ ----------
443
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
444
+ Feature matrix.
445
+
446
+ y : array-like of shape (n_samples,)
447
+ Target vector.
448
+
449
+ discrete_features : 'auto', bool or array-like, default='auto'
450
+ If bool, then determines whether to consider all features discrete
451
+ or continuous. If array, then it should be either a boolean mask
452
+ with shape (n_features,) or array with indices of discrete features.
453
+ If 'auto', it is assigned to False for dense `X` and to True for
454
+ sparse `X`.
455
+
456
+ n_neighbors : int, default=3
457
+ Number of neighbors to use for MI estimation for continuous variables,
458
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
459
+ could introduce a bias.
460
+
461
+ copy : bool, default=True
462
+ Whether to make a copy of the given data. If set to False, the initial
463
+ data will be overwritten.
464
+
465
+ random_state : int, RandomState instance or None, default=None
466
+ Determines random number generation for adding small noise to
467
+ continuous variables in order to remove repeated values.
468
+ Pass an int for reproducible results across multiple function calls.
469
+ See :term:`Glossary <random_state>`.
470
+
471
+ Returns
472
+ -------
473
+ mi : ndarray, shape (n_features,)
474
+ Estimated mutual information between each feature and the target in
475
+ nat units.
476
+
477
+ Notes
478
+ -----
479
+ 1. The term "discrete features" is used instead of naming them
480
+ "categorical", because it describes the essence more accurately.
481
+ For example, pixel intensities of an image are discrete features
482
+ (but hardly categorical) and you will get better results if mark them
483
+ as such. Also note, that treating a continuous variable as discrete and
484
+ vice versa will usually give incorrect results, so be attentive about
485
+ that.
486
+ 2. True mutual information can't be negative. If its estimate turns out
487
+ to be negative, it is replaced by zero.
488
+
489
+ References
490
+ ----------
491
+ .. [1] `Mutual Information
492
+ <https://en.wikipedia.org/wiki/Mutual_information>`_
493
+ on Wikipedia.
494
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
495
+ information". Phys. Rev. E 69, 2004.
496
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
497
+ Data Sets". PLoS ONE 9(2), 2014.
498
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
499
+ of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
500
+
501
+ Examples
502
+ --------
503
+ >>> from sklearn.datasets import make_classification
504
+ >>> from sklearn.feature_selection import mutual_info_classif
505
+ >>> X, y = make_classification(
506
+ ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
507
+ ... shuffle=False, random_state=42
508
+ ... )
509
+ >>> mutual_info_classif(X, y)
510
+ array([0.58..., 0.10..., 0.19..., 0.09... , 0. ,
511
+ 0. , 0. , 0. , 0. , 0. ])
512
+ """
513
+ check_classification_targets(y)
514
+ return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Alexandre Gramfort <[email protected]>
2
+ # Vincent Michel <[email protected]>
3
+ # Gilles Louppe <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ """Recursive feature elimination for feature ranking"""
8
+
9
+ from numbers import Integral
10
+
11
+ import numpy as np
12
+ from joblib import effective_n_jobs
13
+
14
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
15
+ from ..metrics import check_scoring
16
+ from ..model_selection import check_cv
17
+ from ..model_selection._validation import _score
18
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt
19
+ from ..utils.metadata_routing import (
20
+ _raise_for_unsupported_routing,
21
+ _RoutingNotSupportedMixin,
22
+ )
23
+ from ..utils.metaestimators import _safe_split, available_if
24
+ from ..utils.parallel import Parallel, delayed
25
+ from ..utils.validation import check_is_fitted
26
+ from ._base import SelectorMixin, _get_feature_importances
27
+
28
+
29
+ def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
30
+ """
31
+ Return the score for a fit across one fold.
32
+ """
33
+ X_train, y_train = _safe_split(estimator, X, y, train)
34
+ X_test, y_test = _safe_split(estimator, X, y, test, train)
35
+ return rfe._fit(
36
+ X_train,
37
+ y_train,
38
+ lambda estimator, features: _score(
39
+ # TODO(SLEP6): pass score_params here
40
+ estimator,
41
+ X_test[:, features],
42
+ y_test,
43
+ scorer,
44
+ score_params=None,
45
+ ),
46
+ ).scores_
47
+
48
+
49
+ def _estimator_has(attr):
50
+ """Check if we can delegate a method to the underlying estimator.
51
+
52
+ First, we check the fitted `estimator_` if available, otherwise we check the
53
+ unfitted `estimator`. We raise the original `AttributeError` if `attr` does
54
+ not exist. This function is used together with `available_if`.
55
+ """
56
+
57
+ def check(self):
58
+ if hasattr(self, "estimator_"):
59
+ getattr(self.estimator_, attr)
60
+ else:
61
+ getattr(self.estimator, attr)
62
+
63
+ return True
64
+
65
+ return check
66
+
67
+
68
+ class RFE(_RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator):
69
+ """Feature ranking with recursive feature elimination.
70
+
71
+ Given an external estimator that assigns weights to features (e.g., the
72
+ coefficients of a linear model), the goal of recursive feature elimination
73
+ (RFE) is to select features by recursively considering smaller and smaller
74
+ sets of features. First, the estimator is trained on the initial set of
75
+ features and the importance of each feature is obtained either through
76
+ any specific attribute or callable.
77
+ Then, the least important features are pruned from current set of features.
78
+ That procedure is recursively repeated on the pruned set until the desired
79
+ number of features to select is eventually reached.
80
+
81
+ Read more in the :ref:`User Guide <rfe>`.
82
+
83
+ Parameters
84
+ ----------
85
+ estimator : ``Estimator`` instance
86
+ A supervised learning estimator with a ``fit`` method that provides
87
+ information about feature importance
88
+ (e.g. `coef_`, `feature_importances_`).
89
+
90
+ n_features_to_select : int or float, default=None
91
+ The number of features to select. If `None`, half of the features are
92
+ selected. If integer, the parameter is the absolute number of features
93
+ to select. If float between 0 and 1, it is the fraction of features to
94
+ select.
95
+
96
+ .. versionchanged:: 0.24
97
+ Added float values for fractions.
98
+
99
+ step : int or float, default=1
100
+ If greater than or equal to 1, then ``step`` corresponds to the
101
+ (integer) number of features to remove at each iteration.
102
+ If within (0.0, 1.0), then ``step`` corresponds to the percentage
103
+ (rounded down) of features to remove at each iteration.
104
+
105
+ verbose : int, default=0
106
+ Controls verbosity of output.
107
+
108
+ importance_getter : str or callable, default='auto'
109
+ If 'auto', uses the feature importance either through a `coef_`
110
+ or `feature_importances_` attributes of estimator.
111
+
112
+ Also accepts a string that specifies an attribute name/path
113
+ for extracting feature importance (implemented with `attrgetter`).
114
+ For example, give `regressor_.coef_` in case of
115
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
116
+ `named_steps.clf.feature_importances_` in case of
117
+ class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
118
+
119
+ If `callable`, overrides the default feature importance getter.
120
+ The callable is passed with the fitted estimator and it should
121
+ return importance for each feature.
122
+
123
+ .. versionadded:: 0.24
124
+
125
+ Attributes
126
+ ----------
127
+ classes_ : ndarray of shape (n_classes,)
128
+ The classes labels. Only available when `estimator` is a classifier.
129
+
130
+ estimator_ : ``Estimator`` instance
131
+ The fitted estimator used to select features.
132
+
133
+ n_features_ : int
134
+ The number of selected features.
135
+
136
+ n_features_in_ : int
137
+ Number of features seen during :term:`fit`. Only defined if the
138
+ underlying estimator exposes such an attribute when fit.
139
+
140
+ .. versionadded:: 0.24
141
+
142
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
143
+ Names of features seen during :term:`fit`. Defined only when `X`
144
+ has feature names that are all strings.
145
+
146
+ .. versionadded:: 1.0
147
+
148
+ ranking_ : ndarray of shape (n_features,)
149
+ The feature ranking, such that ``ranking_[i]`` corresponds to the
150
+ ranking position of the i-th feature. Selected (i.e., estimated
151
+ best) features are assigned rank 1.
152
+
153
+ support_ : ndarray of shape (n_features,)
154
+ The mask of selected features.
155
+
156
+ See Also
157
+ --------
158
+ RFECV : Recursive feature elimination with built-in cross-validated
159
+ selection of the best number of features.
160
+ SelectFromModel : Feature selection based on thresholds of importance
161
+ weights.
162
+ SequentialFeatureSelector : Sequential cross-validation based feature
163
+ selection. Does not rely on importance weights.
164
+
165
+ Notes
166
+ -----
167
+ Allows NaN/Inf in the input if the underlying estimator does as well.
168
+
169
+ References
170
+ ----------
171
+
172
+ .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
173
+ for cancer classification using support vector machines",
174
+ Mach. Learn., 46(1-3), 389--422, 2002.
175
+
176
+ Examples
177
+ --------
178
+ The following example shows how to retrieve the 5 most informative
179
+ features in the Friedman #1 dataset.
180
+
181
+ >>> from sklearn.datasets import make_friedman1
182
+ >>> from sklearn.feature_selection import RFE
183
+ >>> from sklearn.svm import SVR
184
+ >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
185
+ >>> estimator = SVR(kernel="linear")
186
+ >>> selector = RFE(estimator, n_features_to_select=5, step=1)
187
+ >>> selector = selector.fit(X, y)
188
+ >>> selector.support_
189
+ array([ True, True, True, True, True, False, False, False, False,
190
+ False])
191
+ >>> selector.ranking_
192
+ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
193
+ """
194
+
195
+ _parameter_constraints: dict = {
196
+ "estimator": [HasMethods(["fit"])],
197
+ "n_features_to_select": [
198
+ None,
199
+ Interval(RealNotInt, 0, 1, closed="right"),
200
+ Interval(Integral, 0, None, closed="neither"),
201
+ ],
202
+ "step": [
203
+ Interval(Integral, 0, None, closed="neither"),
204
+ Interval(RealNotInt, 0, 1, closed="neither"),
205
+ ],
206
+ "verbose": ["verbose"],
207
+ "importance_getter": [str, callable],
208
+ }
209
+
210
+ def __init__(
211
+ self,
212
+ estimator,
213
+ *,
214
+ n_features_to_select=None,
215
+ step=1,
216
+ verbose=0,
217
+ importance_getter="auto",
218
+ ):
219
+ self.estimator = estimator
220
+ self.n_features_to_select = n_features_to_select
221
+ self.step = step
222
+ self.importance_getter = importance_getter
223
+ self.verbose = verbose
224
+
225
+ @property
226
+ def _estimator_type(self):
227
+ return self.estimator._estimator_type
228
+
229
+ @property
230
+ def classes_(self):
231
+ """Classes labels available when `estimator` is a classifier.
232
+
233
+ Returns
234
+ -------
235
+ ndarray of shape (n_classes,)
236
+ """
237
+ return self.estimator_.classes_
238
+
239
+ @_fit_context(
240
+ # RFE.estimator is not validated yet
241
+ prefer_skip_nested_validation=False
242
+ )
243
+ def fit(self, X, y, **fit_params):
244
+ """Fit the RFE model and then the underlying estimator on the selected features.
245
+
246
+ Parameters
247
+ ----------
248
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
249
+ The training input samples.
250
+
251
+ y : array-like of shape (n_samples,)
252
+ The target values.
253
+
254
+ **fit_params : dict
255
+ Additional parameters passed to the `fit` method of the underlying
256
+ estimator.
257
+
258
+ Returns
259
+ -------
260
+ self : object
261
+ Fitted estimator.
262
+ """
263
+ _raise_for_unsupported_routing(self, "fit", **fit_params)
264
+ return self._fit(X, y, **fit_params)
265
+
266
+ def _fit(self, X, y, step_score=None, **fit_params):
267
+ # Parameter step_score controls the calculation of self.scores_
268
+ # step_score is not exposed to users
269
+ # and is used when implementing RFECV
270
+ # self.scores_ will not be calculated when calling _fit through fit
271
+
272
+ X, y = self._validate_data(
273
+ X,
274
+ y,
275
+ accept_sparse="csc",
276
+ ensure_min_features=2,
277
+ force_all_finite=False,
278
+ multi_output=True,
279
+ )
280
+
281
+ # Initialization
282
+ n_features = X.shape[1]
283
+ if self.n_features_to_select is None:
284
+ n_features_to_select = n_features // 2
285
+ elif isinstance(self.n_features_to_select, Integral): # int
286
+ n_features_to_select = self.n_features_to_select
287
+ else: # float
288
+ n_features_to_select = int(n_features * self.n_features_to_select)
289
+
290
+ if 0.0 < self.step < 1.0:
291
+ step = int(max(1, self.step * n_features))
292
+ else:
293
+ step = int(self.step)
294
+
295
+ support_ = np.ones(n_features, dtype=bool)
296
+ ranking_ = np.ones(n_features, dtype=int)
297
+
298
+ if step_score:
299
+ self.scores_ = []
300
+
301
+ # Elimination
302
+ while np.sum(support_) > n_features_to_select:
303
+ # Remaining features
304
+ features = np.arange(n_features)[support_]
305
+
306
+ # Rank the remaining features
307
+ estimator = clone(self.estimator)
308
+ if self.verbose > 0:
309
+ print("Fitting estimator with %d features." % np.sum(support_))
310
+
311
+ estimator.fit(X[:, features], y, **fit_params)
312
+
313
+ # Get importance and rank them
314
+ importances = _get_feature_importances(
315
+ estimator,
316
+ self.importance_getter,
317
+ transform_func="square",
318
+ )
319
+ ranks = np.argsort(importances)
320
+
321
+ # for sparse case ranks is matrix
322
+ ranks = np.ravel(ranks)
323
+
324
+ # Eliminate the worse features
325
+ threshold = min(step, np.sum(support_) - n_features_to_select)
326
+
327
+ # Compute step score on the previous selection iteration
328
+ # because 'estimator' must use features
329
+ # that have not been eliminated yet
330
+ if step_score:
331
+ self.scores_.append(step_score(estimator, features))
332
+ support_[features[ranks][:threshold]] = False
333
+ ranking_[np.logical_not(support_)] += 1
334
+
335
+ # Set final attributes
336
+ features = np.arange(n_features)[support_]
337
+ self.estimator_ = clone(self.estimator)
338
+ self.estimator_.fit(X[:, features], y, **fit_params)
339
+
340
+ # Compute step score when only n_features_to_select features left
341
+ if step_score:
342
+ self.scores_.append(step_score(self.estimator_, features))
343
+ self.n_features_ = support_.sum()
344
+ self.support_ = support_
345
+ self.ranking_ = ranking_
346
+
347
+ return self
348
+
349
+ @available_if(_estimator_has("predict"))
350
+ def predict(self, X):
351
+ """Reduce X to the selected features and predict using the estimator.
352
+
353
+ Parameters
354
+ ----------
355
+ X : array of shape [n_samples, n_features]
356
+ The input samples.
357
+
358
+ Returns
359
+ -------
360
+ y : array of shape [n_samples]
361
+ The predicted target values.
362
+ """
363
+ check_is_fitted(self)
364
+ return self.estimator_.predict(self.transform(X))
365
+
366
+ @available_if(_estimator_has("score"))
367
+ def score(self, X, y, **fit_params):
368
+ """Reduce X to the selected features and return the score of the estimator.
369
+
370
+ Parameters
371
+ ----------
372
+ X : array of shape [n_samples, n_features]
373
+ The input samples.
374
+
375
+ y : array of shape [n_samples]
376
+ The target values.
377
+
378
+ **fit_params : dict
379
+ Parameters to pass to the `score` method of the underlying
380
+ estimator.
381
+
382
+ .. versionadded:: 1.0
383
+
384
+ Returns
385
+ -------
386
+ score : float
387
+ Score of the underlying base estimator computed with the selected
388
+ features returned by `rfe.transform(X)` and `y`.
389
+ """
390
+ check_is_fitted(self)
391
+ return self.estimator_.score(self.transform(X), y, **fit_params)
392
+
393
+ def _get_support_mask(self):
394
+ check_is_fitted(self)
395
+ return self.support_
396
+
397
+ @available_if(_estimator_has("decision_function"))
398
+ def decision_function(self, X):
399
+ """Compute the decision function of ``X``.
400
+
401
+ Parameters
402
+ ----------
403
+ X : {array-like or sparse matrix} of shape (n_samples, n_features)
404
+ The input samples. Internally, it will be converted to
405
+ ``dtype=np.float32`` and if a sparse matrix is provided
406
+ to a sparse ``csr_matrix``.
407
+
408
+ Returns
409
+ -------
410
+ score : array, shape = [n_samples, n_classes] or [n_samples]
411
+ The decision function of the input samples. The order of the
412
+ classes corresponds to that in the attribute :term:`classes_`.
413
+ Regression and binary classification produce an array of shape
414
+ [n_samples].
415
+ """
416
+ check_is_fitted(self)
417
+ return self.estimator_.decision_function(self.transform(X))
418
+
419
+ @available_if(_estimator_has("predict_proba"))
420
+ def predict_proba(self, X):
421
+ """Predict class probabilities for X.
422
+
423
+ Parameters
424
+ ----------
425
+ X : {array-like or sparse matrix} of shape (n_samples, n_features)
426
+ The input samples. Internally, it will be converted to
427
+ ``dtype=np.float32`` and if a sparse matrix is provided
428
+ to a sparse ``csr_matrix``.
429
+
430
+ Returns
431
+ -------
432
+ p : array of shape (n_samples, n_classes)
433
+ The class probabilities of the input samples. The order of the
434
+ classes corresponds to that in the attribute :term:`classes_`.
435
+ """
436
+ check_is_fitted(self)
437
+ return self.estimator_.predict_proba(self.transform(X))
438
+
439
+ @available_if(_estimator_has("predict_log_proba"))
440
+ def predict_log_proba(self, X):
441
+ """Predict class log-probabilities for X.
442
+
443
+ Parameters
444
+ ----------
445
+ X : array of shape [n_samples, n_features]
446
+ The input samples.
447
+
448
+ Returns
449
+ -------
450
+ p : array of shape (n_samples, n_classes)
451
+ The class log-probabilities of the input samples. The order of the
452
+ classes corresponds to that in the attribute :term:`classes_`.
453
+ """
454
+ check_is_fitted(self)
455
+ return self.estimator_.predict_log_proba(self.transform(X))
456
+
457
+ def _more_tags(self):
458
+ tags = {
459
+ "poor_score": True,
460
+ "requires_y": True,
461
+ "allow_nan": True,
462
+ }
463
+
464
+ # Adjust allow_nan if estimator explicitly defines `allow_nan`.
465
+ if hasattr(self.estimator, "_get_tags"):
466
+ tags["allow_nan"] = self.estimator._get_tags()["allow_nan"]
467
+
468
+ return tags
469
+
470
+
471
+ class RFECV(RFE):
472
+ """Recursive feature elimination with cross-validation to select features.
473
+
474
+ The number of features selected is tuned automatically by fitting an :class:`RFE`
475
+ selector on the different cross-validation splits (provided by the `cv` parameter).
476
+ The performance of the :class:`RFE` selector are evaluated using `scorer` for
477
+ different number of selected features and aggregated together. Finally, the scores
478
+ are averaged across folds and the number of features selected is set to the number
479
+ of features that maximize the cross-validation score.
480
+ See glossary entry for :term:`cross-validation estimator`.
481
+
482
+ Read more in the :ref:`User Guide <rfe>`.
483
+
484
+ Parameters
485
+ ----------
486
+ estimator : ``Estimator`` instance
487
+ A supervised learning estimator with a ``fit`` method that provides
488
+ information about feature importance either through a ``coef_``
489
+ attribute or through a ``feature_importances_`` attribute.
490
+
491
+ step : int or float, default=1
492
+ If greater than or equal to 1, then ``step`` corresponds to the
493
+ (integer) number of features to remove at each iteration.
494
+ If within (0.0, 1.0), then ``step`` corresponds to the percentage
495
+ (rounded down) of features to remove at each iteration.
496
+ Note that the last iteration may remove fewer than ``step`` features in
497
+ order to reach ``min_features_to_select``.
498
+
499
+ min_features_to_select : int, default=1
500
+ The minimum number of features to be selected. This number of features
501
+ will always be scored, even if the difference between the original
502
+ feature count and ``min_features_to_select`` isn't divisible by
503
+ ``step``.
504
+
505
+ .. versionadded:: 0.20
506
+
507
+ cv : int, cross-validation generator or an iterable, default=None
508
+ Determines the cross-validation splitting strategy.
509
+ Possible inputs for cv are:
510
+
511
+ - None, to use the default 5-fold cross-validation,
512
+ - integer, to specify the number of folds.
513
+ - :term:`CV splitter`,
514
+ - An iterable yielding (train, test) splits as arrays of indices.
515
+
516
+ For integer/None inputs, if ``y`` is binary or multiclass,
517
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. If the
518
+ estimator is a classifier or if ``y`` is neither binary nor multiclass,
519
+ :class:`~sklearn.model_selection.KFold` is used.
520
+
521
+ Refer :ref:`User Guide <cross_validation>` for the various
522
+ cross-validation strategies that can be used here.
523
+
524
+ .. versionchanged:: 0.22
525
+ ``cv`` default value of None changed from 3-fold to 5-fold.
526
+
527
+ scoring : str, callable or None, default=None
528
+ A string (see model evaluation documentation) or
529
+ a scorer callable object / function with signature
530
+ ``scorer(estimator, X, y)``.
531
+
532
+ verbose : int, default=0
533
+ Controls verbosity of output.
534
+
535
+ n_jobs : int or None, default=None
536
+ Number of cores to run in parallel while fitting across folds.
537
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
538
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
539
+ for more details.
540
+
541
+ .. versionadded:: 0.18
542
+
543
+ importance_getter : str or callable, default='auto'
544
+ If 'auto', uses the feature importance either through a `coef_`
545
+ or `feature_importances_` attributes of estimator.
546
+
547
+ Also accepts a string that specifies an attribute name/path
548
+ for extracting feature importance.
549
+ For example, give `regressor_.coef_` in case of
550
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
551
+ `named_steps.clf.feature_importances_` in case of
552
+ :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
553
+
554
+ If `callable`, overrides the default feature importance getter.
555
+ The callable is passed with the fitted estimator and it should
556
+ return importance for each feature.
557
+
558
+ .. versionadded:: 0.24
559
+
560
+ Attributes
561
+ ----------
562
+ classes_ : ndarray of shape (n_classes,)
563
+ The classes labels. Only available when `estimator` is a classifier.
564
+
565
+ estimator_ : ``Estimator`` instance
566
+ The fitted estimator used to select features.
567
+
568
+ cv_results_ : dict of ndarrays
569
+ A dict with keys:
570
+
571
+ split(k)_test_score : ndarray of shape (n_subsets_of_features,)
572
+ The cross-validation scores across (k)th fold.
573
+
574
+ mean_test_score : ndarray of shape (n_subsets_of_features,)
575
+ Mean of scores over the folds.
576
+
577
+ std_test_score : ndarray of shape (n_subsets_of_features,)
578
+ Standard deviation of scores over the folds.
579
+
580
+ .. versionadded:: 1.0
581
+
582
+ n_features_ : int
583
+ The number of selected features with cross-validation.
584
+
585
+ n_features_in_ : int
586
+ Number of features seen during :term:`fit`. Only defined if the
587
+ underlying estimator exposes such an attribute when fit.
588
+
589
+ .. versionadded:: 0.24
590
+
591
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
592
+ Names of features seen during :term:`fit`. Defined only when `X`
593
+ has feature names that are all strings.
594
+
595
+ .. versionadded:: 1.0
596
+
597
+ ranking_ : narray of shape (n_features,)
598
+ The feature ranking, such that `ranking_[i]`
599
+ corresponds to the ranking
600
+ position of the i-th feature.
601
+ Selected (i.e., estimated best)
602
+ features are assigned rank 1.
603
+
604
+ support_ : ndarray of shape (n_features,)
605
+ The mask of selected features.
606
+
607
+ See Also
608
+ --------
609
+ RFE : Recursive feature elimination.
610
+
611
+ Notes
612
+ -----
613
+ The size of all values in ``cv_results_`` is equal to
614
+ ``ceil((n_features - min_features_to_select) / step) + 1``,
615
+ where step is the number of features removed at each iteration.
616
+
617
+ Allows NaN/Inf in the input if the underlying estimator does as well.
618
+
619
+ References
620
+ ----------
621
+
622
+ .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
623
+ for cancer classification using support vector machines",
624
+ Mach. Learn., 46(1-3), 389--422, 2002.
625
+
626
+ Examples
627
+ --------
628
+ The following example shows how to retrieve the a-priori not known 5
629
+ informative features in the Friedman #1 dataset.
630
+
631
+ >>> from sklearn.datasets import make_friedman1
632
+ >>> from sklearn.feature_selection import RFECV
633
+ >>> from sklearn.svm import SVR
634
+ >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
635
+ >>> estimator = SVR(kernel="linear")
636
+ >>> selector = RFECV(estimator, step=1, cv=5)
637
+ >>> selector = selector.fit(X, y)
638
+ >>> selector.support_
639
+ array([ True, True, True, True, True, False, False, False, False,
640
+ False])
641
+ >>> selector.ranking_
642
+ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
643
+ """
644
+
645
+ _parameter_constraints: dict = {
646
+ **RFE._parameter_constraints,
647
+ "min_features_to_select": [Interval(Integral, 0, None, closed="neither")],
648
+ "cv": ["cv_object"],
649
+ "scoring": [None, str, callable],
650
+ "n_jobs": [None, Integral],
651
+ }
652
+ _parameter_constraints.pop("n_features_to_select")
653
+
654
+ def __init__(
655
+ self,
656
+ estimator,
657
+ *,
658
+ step=1,
659
+ min_features_to_select=1,
660
+ cv=None,
661
+ scoring=None,
662
+ verbose=0,
663
+ n_jobs=None,
664
+ importance_getter="auto",
665
+ ):
666
+ self.estimator = estimator
667
+ self.step = step
668
+ self.importance_getter = importance_getter
669
+ self.cv = cv
670
+ self.scoring = scoring
671
+ self.verbose = verbose
672
+ self.n_jobs = n_jobs
673
+ self.min_features_to_select = min_features_to_select
674
+
675
+ @_fit_context(
676
+ # RFECV.estimator is not validated yet
677
+ prefer_skip_nested_validation=False
678
+ )
679
+ def fit(self, X, y, groups=None):
680
+ """Fit the RFE model and automatically tune the number of selected features.
681
+
682
+ Parameters
683
+ ----------
684
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
685
+ Training vector, where `n_samples` is the number of samples and
686
+ `n_features` is the total number of features.
687
+
688
+ y : array-like of shape (n_samples,)
689
+ Target values (integers for classification, real numbers for
690
+ regression).
691
+
692
+ groups : array-like of shape (n_samples,) or None, default=None
693
+ Group labels for the samples used while splitting the dataset into
694
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
695
+ instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
696
+
697
+ .. versionadded:: 0.20
698
+
699
+ Returns
700
+ -------
701
+ self : object
702
+ Fitted estimator.
703
+ """
704
+ _raise_for_unsupported_routing(self, "fit", groups=groups)
705
+ X, y = self._validate_data(
706
+ X,
707
+ y,
708
+ accept_sparse="csr",
709
+ ensure_min_features=2,
710
+ force_all_finite=False,
711
+ multi_output=True,
712
+ )
713
+
714
+ # Initialization
715
+ cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
716
+ scorer = check_scoring(self.estimator, scoring=self.scoring)
717
+ n_features = X.shape[1]
718
+
719
+ if 0.0 < self.step < 1.0:
720
+ step = int(max(1, self.step * n_features))
721
+ else:
722
+ step = int(self.step)
723
+
724
+ # Build an RFE object, which will evaluate and score each possible
725
+ # feature count, down to self.min_features_to_select
726
+ rfe = RFE(
727
+ estimator=self.estimator,
728
+ n_features_to_select=self.min_features_to_select,
729
+ importance_getter=self.importance_getter,
730
+ step=self.step,
731
+ verbose=self.verbose,
732
+ )
733
+
734
+ # Determine the number of subsets of features by fitting across
735
+ # the train folds and choosing the "features_to_select" parameter
736
+ # that gives the least averaged error across all folds.
737
+
738
+ # Note that joblib raises a non-picklable error for bound methods
739
+ # even if n_jobs is set to 1 with the default multiprocessing
740
+ # backend.
741
+ # This branching is done so that to
742
+ # make sure that user code that sets n_jobs to 1
743
+ # and provides bound methods as scorers is not broken with the
744
+ # addition of n_jobs parameter in version 0.18.
745
+
746
+ if effective_n_jobs(self.n_jobs) == 1:
747
+ parallel, func = list, _rfe_single_fit
748
+ else:
749
+ parallel = Parallel(n_jobs=self.n_jobs)
750
+ func = delayed(_rfe_single_fit)
751
+
752
+ scores = parallel(
753
+ func(rfe, self.estimator, X, y, train, test, scorer)
754
+ for train, test in cv.split(X, y, groups)
755
+ )
756
+
757
+ scores = np.array(scores)
758
+ scores_sum = np.sum(scores, axis=0)
759
+ scores_sum_rev = scores_sum[::-1]
760
+ argmax_idx = len(scores_sum) - np.argmax(scores_sum_rev) - 1
761
+ n_features_to_select = max(
762
+ n_features - (argmax_idx * step), self.min_features_to_select
763
+ )
764
+
765
+ # Re-execute an elimination with best_k over the whole set
766
+ rfe = RFE(
767
+ estimator=self.estimator,
768
+ n_features_to_select=n_features_to_select,
769
+ step=self.step,
770
+ importance_getter=self.importance_getter,
771
+ verbose=self.verbose,
772
+ )
773
+
774
+ rfe.fit(X, y)
775
+
776
+ # Set final attributes
777
+ self.support_ = rfe.support_
778
+ self.n_features_ = rfe.n_features_
779
+ self.ranking_ = rfe.ranking_
780
+ self.estimator_ = clone(self.estimator)
781
+ self.estimator_.fit(self._transform(X), y)
782
+
783
+ # reverse to stay consistent with before
784
+ scores_rev = scores[:, ::-1]
785
+ self.cv_results_ = {}
786
+ self.cv_results_["mean_test_score"] = np.mean(scores_rev, axis=0)
787
+ self.cv_results_["std_test_score"] = np.std(scores_rev, axis=0)
788
+
789
+ for i in range(scores.shape[0]):
790
+ self.cv_results_[f"split{i}_test_score"] = scores_rev[i]
791
+
792
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sequential feature selection
3
+ """
4
+ from numbers import Integral, Real
5
+
6
+ import numpy as np
7
+
8
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
9
+ from ..metrics import get_scorer_names
10
+ from ..model_selection import check_cv, cross_val_score
11
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
12
+ from ..utils._tags import _safe_tags
13
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
14
+ from ..utils.validation import check_is_fitted
15
+ from ._base import SelectorMixin
16
+
17
+
18
+ class SequentialFeatureSelector(
19
+ _RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator
20
+ ):
21
+ """Transformer that performs Sequential Feature Selection.
22
+
23
+ This Sequential Feature Selector adds (forward selection) or
24
+ removes (backward selection) features to form a feature subset in a
25
+ greedy fashion. At each stage, this estimator chooses the best feature to
26
+ add or remove based on the cross-validation score of an estimator. In
27
+ the case of unsupervised learning, this Sequential Feature Selector
28
+ looks only at the features (X), not the desired outputs (y).
29
+
30
+ Read more in the :ref:`User Guide <sequential_feature_selection>`.
31
+
32
+ .. versionadded:: 0.24
33
+
34
+ Parameters
35
+ ----------
36
+ estimator : estimator instance
37
+ An unfitted estimator.
38
+
39
+ n_features_to_select : "auto", int or float, default="auto"
40
+ If `"auto"`, the behaviour depends on the `tol` parameter:
41
+
42
+ - if `tol` is not `None`, then features are selected while the score
43
+ change does not exceed `tol`.
44
+ - otherwise, half of the features are selected.
45
+
46
+ If integer, the parameter is the absolute number of features to select.
47
+ If float between 0 and 1, it is the fraction of features to select.
48
+
49
+ .. versionadded:: 1.1
50
+ The option `"auto"` was added in version 1.1.
51
+
52
+ .. versionchanged:: 1.3
53
+ The default changed from `"warn"` to `"auto"` in 1.3.
54
+
55
+ tol : float, default=None
56
+ If the score is not incremented by at least `tol` between two
57
+ consecutive feature additions or removals, stop adding or removing.
58
+
59
+ `tol` can be negative when removing features using `direction="backward"`.
60
+ It can be useful to reduce the number of features at the cost of a small
61
+ decrease in the score.
62
+
63
+ `tol` is enabled only when `n_features_to_select` is `"auto"`.
64
+
65
+ .. versionadded:: 1.1
66
+
67
+ direction : {'forward', 'backward'}, default='forward'
68
+ Whether to perform forward selection or backward selection.
69
+
70
+ scoring : str or callable, default=None
71
+ A single str (see :ref:`scoring_parameter`) or a callable
72
+ (see :ref:`scoring`) to evaluate the predictions on the test set.
73
+
74
+ NOTE that when using a custom scorer, it should return a single
75
+ value.
76
+
77
+ If None, the estimator's score method is used.
78
+
79
+ cv : int, cross-validation generator or an iterable, default=None
80
+ Determines the cross-validation splitting strategy.
81
+ Possible inputs for cv are:
82
+
83
+ - None, to use the default 5-fold cross validation,
84
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
85
+ - :term:`CV splitter`,
86
+ - An iterable yielding (train, test) splits as arrays of indices.
87
+
88
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
89
+ either binary or multiclass,
90
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other
91
+ cases, :class:`~sklearn.model_selection.KFold` is used. These splitters
92
+ are instantiated with `shuffle=False` so the splits will be the same
93
+ across calls.
94
+
95
+ Refer :ref:`User Guide <cross_validation>` for the various
96
+ cross-validation strategies that can be used here.
97
+
98
+ n_jobs : int, default=None
99
+ Number of jobs to run in parallel. When evaluating a new feature to
100
+ add or remove, the cross-validation procedure is parallel over the
101
+ folds.
102
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
103
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
104
+ for more details.
105
+
106
+ Attributes
107
+ ----------
108
+ n_features_in_ : int
109
+ Number of features seen during :term:`fit`. Only defined if the
110
+ underlying estimator exposes such an attribute when fit.
111
+
112
+ .. versionadded:: 0.24
113
+
114
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
115
+ Names of features seen during :term:`fit`. Defined only when `X`
116
+ has feature names that are all strings.
117
+
118
+ .. versionadded:: 1.0
119
+
120
+ n_features_to_select_ : int
121
+ The number of features that were selected.
122
+
123
+ support_ : ndarray of shape (n_features,), dtype=bool
124
+ The mask of selected features.
125
+
126
+ See Also
127
+ --------
128
+ GenericUnivariateSelect : Univariate feature selector with configurable
129
+ strategy.
130
+ RFE : Recursive feature elimination based on importance weights.
131
+ RFECV : Recursive feature elimination based on importance weights, with
132
+ automatic selection of the number of features.
133
+ SelectFromModel : Feature selection based on thresholds of importance
134
+ weights.
135
+
136
+ Examples
137
+ --------
138
+ >>> from sklearn.feature_selection import SequentialFeatureSelector
139
+ >>> from sklearn.neighbors import KNeighborsClassifier
140
+ >>> from sklearn.datasets import load_iris
141
+ >>> X, y = load_iris(return_X_y=True)
142
+ >>> knn = KNeighborsClassifier(n_neighbors=3)
143
+ >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3)
144
+ >>> sfs.fit(X, y)
145
+ SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3),
146
+ n_features_to_select=3)
147
+ >>> sfs.get_support()
148
+ array([ True, False, True, True])
149
+ >>> sfs.transform(X).shape
150
+ (150, 3)
151
+ """
152
+
153
+ _parameter_constraints: dict = {
154
+ "estimator": [HasMethods(["fit"])],
155
+ "n_features_to_select": [
156
+ StrOptions({"auto"}),
157
+ Interval(RealNotInt, 0, 1, closed="right"),
158
+ Interval(Integral, 0, None, closed="neither"),
159
+ ],
160
+ "tol": [None, Interval(Real, None, None, closed="neither")],
161
+ "direction": [StrOptions({"forward", "backward"})],
162
+ "scoring": [None, StrOptions(set(get_scorer_names())), callable],
163
+ "cv": ["cv_object"],
164
+ "n_jobs": [None, Integral],
165
+ }
166
+
167
+ def __init__(
168
+ self,
169
+ estimator,
170
+ *,
171
+ n_features_to_select="auto",
172
+ tol=None,
173
+ direction="forward",
174
+ scoring=None,
175
+ cv=5,
176
+ n_jobs=None,
177
+ ):
178
+ self.estimator = estimator
179
+ self.n_features_to_select = n_features_to_select
180
+ self.tol = tol
181
+ self.direction = direction
182
+ self.scoring = scoring
183
+ self.cv = cv
184
+ self.n_jobs = n_jobs
185
+
186
+ @_fit_context(
187
+ # SequentialFeatureSelector.estimator is not validated yet
188
+ prefer_skip_nested_validation=False
189
+ )
190
+ def fit(self, X, y=None):
191
+ """Learn the features to select from X.
192
+
193
+ Parameters
194
+ ----------
195
+ X : array-like of shape (n_samples, n_features)
196
+ Training vectors, where `n_samples` is the number of samples and
197
+ `n_features` is the number of predictors.
198
+
199
+ y : array-like of shape (n_samples,), default=None
200
+ Target values. This parameter may be ignored for
201
+ unsupervised learning.
202
+
203
+ Returns
204
+ -------
205
+ self : object
206
+ Returns the instance itself.
207
+ """
208
+ tags = self._get_tags()
209
+ X = self._validate_data(
210
+ X,
211
+ accept_sparse="csc",
212
+ ensure_min_features=2,
213
+ force_all_finite=not tags.get("allow_nan", True),
214
+ )
215
+ n_features = X.shape[1]
216
+
217
+ if self.n_features_to_select == "auto":
218
+ if self.tol is not None:
219
+ # With auto feature selection, `n_features_to_select_` will be updated
220
+ # to `support_.sum()` after features are selected.
221
+ self.n_features_to_select_ = n_features - 1
222
+ else:
223
+ self.n_features_to_select_ = n_features // 2
224
+ elif isinstance(self.n_features_to_select, Integral):
225
+ if self.n_features_to_select >= n_features:
226
+ raise ValueError("n_features_to_select must be < n_features.")
227
+ self.n_features_to_select_ = self.n_features_to_select
228
+ elif isinstance(self.n_features_to_select, Real):
229
+ self.n_features_to_select_ = int(n_features * self.n_features_to_select)
230
+
231
+ if self.tol is not None and self.tol < 0 and self.direction == "forward":
232
+ raise ValueError("tol must be positive when doing forward selection")
233
+
234
+ cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
235
+
236
+ cloned_estimator = clone(self.estimator)
237
+
238
+ # the current mask corresponds to the set of features:
239
+ # - that we have already *selected* if we do forward selection
240
+ # - that we have already *excluded* if we do backward selection
241
+ current_mask = np.zeros(shape=n_features, dtype=bool)
242
+ n_iterations = (
243
+ self.n_features_to_select_
244
+ if self.n_features_to_select == "auto" or self.direction == "forward"
245
+ else n_features - self.n_features_to_select_
246
+ )
247
+
248
+ old_score = -np.inf
249
+ is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
250
+ for _ in range(n_iterations):
251
+ new_feature_idx, new_score = self._get_best_new_feature_score(
252
+ cloned_estimator, X, y, cv, current_mask
253
+ )
254
+ if is_auto_select and ((new_score - old_score) < self.tol):
255
+ break
256
+
257
+ old_score = new_score
258
+ current_mask[new_feature_idx] = True
259
+
260
+ if self.direction == "backward":
261
+ current_mask = ~current_mask
262
+
263
+ self.support_ = current_mask
264
+ self.n_features_to_select_ = self.support_.sum()
265
+
266
+ return self
267
+
268
+ def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask):
269
+ # Return the best new feature and its score to add to the current_mask,
270
+ # i.e. return the best new feature and its score to add (resp. remove)
271
+ # when doing forward selection (resp. backward selection).
272
+ # Feature will be added if the current score and past score are greater
273
+ # than tol when n_feature is auto,
274
+ candidate_feature_indices = np.flatnonzero(~current_mask)
275
+ scores = {}
276
+ for feature_idx in candidate_feature_indices:
277
+ candidate_mask = current_mask.copy()
278
+ candidate_mask[feature_idx] = True
279
+ if self.direction == "backward":
280
+ candidate_mask = ~candidate_mask
281
+ X_new = X[:, candidate_mask]
282
+ scores[feature_idx] = cross_val_score(
283
+ estimator,
284
+ X_new,
285
+ y,
286
+ cv=cv,
287
+ scoring=self.scoring,
288
+ n_jobs=self.n_jobs,
289
+ ).mean()
290
+ new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx])
291
+ return new_feature_idx, scores[new_feature_idx]
292
+
293
+ def _get_support_mask(self):
294
+ check_is_fitted(self)
295
+ return self.support_
296
+
297
+ def _more_tags(self):
298
+ return {
299
+ "allow_nan": _safe_tags(self.estimator, key="allow_nan"),
300
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py ADDED
@@ -0,0 +1,1161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Univariate features selection."""
2
+
3
+ # Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
4
+ # L. Buitinck, A. Joly
5
+ # License: BSD 3 clause
6
+
7
+
8
+ import warnings
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ from scipy import special, stats
13
+ from scipy.sparse import issparse
14
+
15
+ from ..base import BaseEstimator, _fit_context
16
+ from ..preprocessing import LabelBinarizer
17
+ from ..utils import as_float_array, check_array, check_X_y, safe_mask, safe_sqr
18
+ from ..utils._param_validation import Interval, StrOptions, validate_params
19
+ from ..utils.extmath import row_norms, safe_sparse_dot
20
+ from ..utils.validation import check_is_fitted
21
+ from ._base import SelectorMixin
22
+
23
+
24
+ def _clean_nans(scores):
25
+ """
26
+ Fixes Issue #1240: NaNs can't be properly compared, so change them to the
27
+ smallest value of scores's dtype. -inf seems to be unreliable.
28
+ """
29
+ # XXX where should this function be called? fit? scoring functions
30
+ # themselves?
31
+ scores = as_float_array(scores, copy=True)
32
+ scores[np.isnan(scores)] = np.finfo(scores.dtype).min
33
+ return scores
34
+
35
+
36
+ ######################################################################
37
+ # Scoring functions
38
+
39
+
40
+ # The following function is a rewriting of scipy.stats.f_oneway
41
+ # Contrary to the scipy.stats.f_oneway implementation it does not
42
+ # copy the data while keeping the inputs unchanged.
43
+ def f_oneway(*args):
44
+ """Perform a 1-way ANOVA.
45
+
46
+ The one-way ANOVA tests the null hypothesis that 2 or more groups have
47
+ the same population mean. The test is applied to samples from two or
48
+ more groups, possibly with differing sizes.
49
+
50
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
51
+
52
+ Parameters
53
+ ----------
54
+ *args : {array-like, sparse matrix}
55
+ Sample1, sample2... The sample measurements should be given as
56
+ arguments.
57
+
58
+ Returns
59
+ -------
60
+ f_statistic : float
61
+ The computed F-value of the test.
62
+ p_value : float
63
+ The associated p-value from the F-distribution.
64
+
65
+ Notes
66
+ -----
67
+ The ANOVA test has important assumptions that must be satisfied in order
68
+ for the associated p-value to be valid.
69
+
70
+ 1. The samples are independent
71
+ 2. Each sample is from a normally distributed population
72
+ 3. The population standard deviations of the groups are all equal. This
73
+ property is known as homoscedasticity.
74
+
75
+ If these assumptions are not true for a given set of data, it may still be
76
+ possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
77
+ with some loss of power.
78
+
79
+ The algorithm is from Heiman[2], pp.394-7.
80
+
81
+ See ``scipy.stats.f_oneway`` that should give the same results while
82
+ being less efficient.
83
+
84
+ References
85
+ ----------
86
+ .. [1] Lowry, Richard. "Concepts and Applications of Inferential
87
+ Statistics". Chapter 14.
88
+ http://vassarstats.net/textbook
89
+
90
+ .. [2] Heiman, G.W. Research Methods in Statistics. 2002.
91
+ """
92
+ n_classes = len(args)
93
+ args = [as_float_array(a) for a in args]
94
+ n_samples_per_class = np.array([a.shape[0] for a in args])
95
+ n_samples = np.sum(n_samples_per_class)
96
+ ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
97
+ sums_args = [np.asarray(a.sum(axis=0)) for a in args]
98
+ square_of_sums_alldata = sum(sums_args) ** 2
99
+ square_of_sums_args = [s**2 for s in sums_args]
100
+ sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
101
+ ssbn = 0.0
102
+ for k, _ in enumerate(args):
103
+ ssbn += square_of_sums_args[k] / n_samples_per_class[k]
104
+ ssbn -= square_of_sums_alldata / float(n_samples)
105
+ sswn = sstot - ssbn
106
+ dfbn = n_classes - 1
107
+ dfwn = n_samples - n_classes
108
+ msb = ssbn / float(dfbn)
109
+ msw = sswn / float(dfwn)
110
+ constant_features_idx = np.where(msw == 0.0)[0]
111
+ if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size:
112
+ warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
113
+ f = msb / msw
114
+ # flatten matrix to vector in sparse case
115
+ f = np.asarray(f).ravel()
116
+ prob = special.fdtrc(dfbn, dfwn, f)
117
+ return f, prob
118
+
119
+
120
+ @validate_params(
121
+ {
122
+ "X": ["array-like", "sparse matrix"],
123
+ "y": ["array-like"],
124
+ },
125
+ prefer_skip_nested_validation=True,
126
+ )
127
+ def f_classif(X, y):
128
+ """Compute the ANOVA F-value for the provided sample.
129
+
130
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
131
+
132
+ Parameters
133
+ ----------
134
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
135
+ The set of regressors that will be tested sequentially.
136
+
137
+ y : array-like of shape (n_samples,)
138
+ The target vector.
139
+
140
+ Returns
141
+ -------
142
+ f_statistic : ndarray of shape (n_features,)
143
+ F-statistic for each feature.
144
+
145
+ p_values : ndarray of shape (n_features,)
146
+ P-values associated with the F-statistic.
147
+
148
+ See Also
149
+ --------
150
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
151
+ f_regression : F-value between label/feature for regression tasks.
152
+
153
+ Examples
154
+ --------
155
+ >>> from sklearn.datasets import make_classification
156
+ >>> from sklearn.feature_selection import f_classif
157
+ >>> X, y = make_classification(
158
+ ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
159
+ ... shuffle=False, random_state=42
160
+ ... )
161
+ >>> f_statistic, p_values = f_classif(X, y)
162
+ >>> f_statistic
163
+ array([2.2...e+02, 7.0...e-01, 1.6...e+00, 9.3...e-01,
164
+ 5.4...e+00, 3.2...e-01, 4.7...e-02, 5.7...e-01,
165
+ 7.5...e-01, 8.9...e-02])
166
+ >>> p_values
167
+ array([7.1...e-27, 4.0...e-01, 1.9...e-01, 3.3...e-01,
168
+ 2.2...e-02, 5.7...e-01, 8.2...e-01, 4.5...e-01,
169
+ 3.8...e-01, 7.6...e-01])
170
+ """
171
+ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"])
172
+ args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
173
+ return f_oneway(*args)
174
+
175
+
176
+ def _chisquare(f_obs, f_exp):
177
+ """Fast replacement for scipy.stats.chisquare.
178
+
179
+ Version from https://github.com/scipy/scipy/pull/2525 with additional
180
+ optimizations.
181
+ """
182
+ f_obs = np.asarray(f_obs, dtype=np.float64)
183
+
184
+ k = len(f_obs)
185
+ # Reuse f_obs for chi-squared statistics
186
+ chisq = f_obs
187
+ chisq -= f_exp
188
+ chisq **= 2
189
+ with np.errstate(invalid="ignore"):
190
+ chisq /= f_exp
191
+ chisq = chisq.sum(axis=0)
192
+ return chisq, special.chdtrc(k - 1, chisq)
193
+
194
+
195
+ @validate_params(
196
+ {
197
+ "X": ["array-like", "sparse matrix"],
198
+ "y": ["array-like"],
199
+ },
200
+ prefer_skip_nested_validation=True,
201
+ )
202
+ def chi2(X, y):
203
+ """Compute chi-squared stats between each non-negative feature and class.
204
+
205
+ This score can be used to select the `n_features` features with the
206
+ highest values for the test chi-squared statistic from X, which must
207
+ contain only **non-negative features** such as booleans or frequencies
208
+ (e.g., term counts in document classification), relative to the classes.
209
+
210
+ Recall that the chi-square test measures dependence between stochastic
211
+ variables, so using this function "weeds out" the features that are the
212
+ most likely to be independent of class and therefore irrelevant for
213
+ classification.
214
+
215
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
216
+
217
+ Parameters
218
+ ----------
219
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
220
+ Sample vectors.
221
+
222
+ y : array-like of shape (n_samples,)
223
+ Target vector (class labels).
224
+
225
+ Returns
226
+ -------
227
+ chi2 : ndarray of shape (n_features,)
228
+ Chi2 statistics for each feature.
229
+
230
+ p_values : ndarray of shape (n_features,)
231
+ P-values for each feature.
232
+
233
+ See Also
234
+ --------
235
+ f_classif : ANOVA F-value between label/feature for classification tasks.
236
+ f_regression : F-value between label/feature for regression tasks.
237
+
238
+ Notes
239
+ -----
240
+ Complexity of this algorithm is O(n_classes * n_features).
241
+
242
+ Examples
243
+ --------
244
+ >>> import numpy as np
245
+ >>> from sklearn.feature_selection import chi2
246
+ >>> X = np.array([[1, 1, 3],
247
+ ... [0, 1, 5],
248
+ ... [5, 4, 1],
249
+ ... [6, 6, 2],
250
+ ... [1, 4, 0],
251
+ ... [0, 0, 0]])
252
+ >>> y = np.array([1, 1, 0, 0, 2, 2])
253
+ >>> chi2_stats, p_values = chi2(X, y)
254
+ >>> chi2_stats
255
+ array([15.3..., 6.5 , 8.9...])
256
+ >>> p_values
257
+ array([0.0004..., 0.0387..., 0.0116... ])
258
+ """
259
+
260
+ # XXX: we might want to do some of the following in logspace instead for
261
+ # numerical stability.
262
+ # Converting X to float allows getting better performance for the
263
+ # safe_sparse_dot call made below.
264
+ X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32))
265
+ if np.any((X.data if issparse(X) else X) < 0):
266
+ raise ValueError("Input X must be non-negative.")
267
+
268
+ # Use a sparse representation for Y by default to reduce memory usage when
269
+ # y has many unique classes.
270
+ Y = LabelBinarizer(sparse_output=True).fit_transform(y)
271
+ if Y.shape[1] == 1:
272
+ Y = Y.toarray()
273
+ Y = np.append(1 - Y, Y, axis=1)
274
+
275
+ observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
276
+
277
+ if issparse(observed):
278
+ # convert back to a dense array before calling _chisquare
279
+ # XXX: could _chisquare be reimplement to accept sparse matrices for
280
+ # cases where both n_classes and n_features are large (and X is
281
+ # sparse)?
282
+ observed = observed.toarray()
283
+
284
+ feature_count = X.sum(axis=0).reshape(1, -1)
285
+ class_prob = Y.mean(axis=0).reshape(1, -1)
286
+ expected = np.dot(class_prob.T, feature_count)
287
+
288
+ return _chisquare(observed, expected)
289
+
290
+
291
+ @validate_params(
292
+ {
293
+ "X": ["array-like", "sparse matrix"],
294
+ "y": ["array-like"],
295
+ "center": ["boolean"],
296
+ "force_finite": ["boolean"],
297
+ },
298
+ prefer_skip_nested_validation=True,
299
+ )
300
+ def r_regression(X, y, *, center=True, force_finite=True):
301
+ """Compute Pearson's r for each features and the target.
302
+
303
+ Pearson's r is also known as the Pearson correlation coefficient.
304
+
305
+ Linear model for testing the individual effect of each of many regressors.
306
+ This is a scoring function to be used in a feature selection procedure, not
307
+ a free standing feature selection procedure.
308
+
309
+ The cross correlation between each regressor and the target is computed
310
+ as::
311
+
312
+ E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
313
+
314
+ For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
315
+
316
+ .. versionadded:: 1.0
317
+
318
+ Parameters
319
+ ----------
320
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
321
+ The data matrix.
322
+
323
+ y : array-like of shape (n_samples,)
324
+ The target vector.
325
+
326
+ center : bool, default=True
327
+ Whether or not to center the data matrix `X` and the target vector `y`.
328
+ By default, `X` and `y` will be centered.
329
+
330
+ force_finite : bool, default=True
331
+ Whether or not to force the Pearson's R correlation to be finite.
332
+ In the particular case where some features in `X` or the target `y`
333
+ are constant, the Pearson's R correlation is not defined. When
334
+ `force_finite=False`, a correlation of `np.nan` is returned to
335
+ acknowledge this case. When `force_finite=True`, this value will be
336
+ forced to a minimal correlation of `0.0`.
337
+
338
+ .. versionadded:: 1.1
339
+
340
+ Returns
341
+ -------
342
+ correlation_coefficient : ndarray of shape (n_features,)
343
+ Pearson's R correlation coefficients of features.
344
+
345
+ See Also
346
+ --------
347
+ f_regression: Univariate linear regression tests returning f-statistic
348
+ and p-values.
349
+ mutual_info_regression: Mutual information for a continuous target.
350
+ f_classif: ANOVA F-value between label/feature for classification tasks.
351
+ chi2: Chi-squared stats of non-negative features for classification tasks.
352
+
353
+ Examples
354
+ --------
355
+ >>> from sklearn.datasets import make_regression
356
+ >>> from sklearn.feature_selection import r_regression
357
+ >>> X, y = make_regression(
358
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
359
+ ... )
360
+ >>> r_regression(X, y)
361
+ array([-0.15..., 1. , -0.22...])
362
+ """
363
+ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64)
364
+ n_samples = X.shape[0]
365
+
366
+ # Compute centered values
367
+ # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
368
+ # need not center X
369
+ if center:
370
+ y = y - np.mean(y)
371
+ # TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays.
372
+ # Here, we check the output of the `.mean` operation that returns a `np.matrix`
373
+ # for sparse matrices while a `np.array` for dense and sparse arrays.
374
+ # We can reconsider using `isspmatrix` when the minimum version is
375
+ # SciPy >= 1.11
376
+ X_means = X.mean(axis=0)
377
+ X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means
378
+ # Compute the scaled standard deviations via moments
379
+ X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2)
380
+ else:
381
+ X_norms = row_norms(X.T)
382
+
383
+ correlation_coefficient = safe_sparse_dot(y, X)
384
+ with np.errstate(divide="ignore", invalid="ignore"):
385
+ correlation_coefficient /= X_norms
386
+ correlation_coefficient /= np.linalg.norm(y)
387
+
388
+ if force_finite and not np.isfinite(correlation_coefficient).all():
389
+ # case where the target or some features are constant
390
+ # the correlation coefficient(s) is/are set to the minimum (i.e. 0.0)
391
+ nan_mask = np.isnan(correlation_coefficient)
392
+ correlation_coefficient[nan_mask] = 0.0
393
+ return correlation_coefficient
394
+
395
+
396
+ @validate_params(
397
+ {
398
+ "X": ["array-like", "sparse matrix"],
399
+ "y": ["array-like"],
400
+ "center": ["boolean"],
401
+ "force_finite": ["boolean"],
402
+ },
403
+ prefer_skip_nested_validation=True,
404
+ )
405
+ def f_regression(X, y, *, center=True, force_finite=True):
406
+ """Univariate linear regression tests returning F-statistic and p-values.
407
+
408
+ Quick linear model for testing the effect of a single regressor,
409
+ sequentially for many regressors.
410
+
411
+ This is done in 2 steps:
412
+
413
+ 1. The cross correlation between each regressor and the target is computed
414
+ using :func:`r_regression` as::
415
+
416
+ E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
417
+
418
+ 2. It is converted to an F score and then to a p-value.
419
+
420
+ :func:`f_regression` is derived from :func:`r_regression` and will rank
421
+ features in the same order if all the features are positively correlated
422
+ with the target.
423
+
424
+ Note however that contrary to :func:`f_regression`, :func:`r_regression`
425
+ values lie in [-1, 1] and can thus be negative. :func:`f_regression` is
426
+ therefore recommended as a feature selection criterion to identify
427
+ potentially predictive feature for a downstream classifier, irrespective of
428
+ the sign of the association with the target variable.
429
+
430
+ Furthermore :func:`f_regression` returns p-values while
431
+ :func:`r_regression` does not.
432
+
433
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
434
+
435
+ Parameters
436
+ ----------
437
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
438
+ The data matrix.
439
+
440
+ y : array-like of shape (n_samples,)
441
+ The target vector.
442
+
443
+ center : bool, default=True
444
+ Whether or not to center the data matrix `X` and the target vector `y`.
445
+ By default, `X` and `y` will be centered.
446
+
447
+ force_finite : bool, default=True
448
+ Whether or not to force the F-statistics and associated p-values to
449
+ be finite. There are two cases where the F-statistic is expected to not
450
+ be finite:
451
+
452
+ - when the target `y` or some features in `X` are constant. In this
453
+ case, the Pearson's R correlation is not defined leading to obtain
454
+ `np.nan` values in the F-statistic and p-value. When
455
+ `force_finite=True`, the F-statistic is set to `0.0` and the
456
+ associated p-value is set to `1.0`.
457
+ - when a feature in `X` is perfectly correlated (or
458
+ anti-correlated) with the target `y`. In this case, the F-statistic
459
+ is expected to be `np.inf`. When `force_finite=True`, the F-statistic
460
+ is set to `np.finfo(dtype).max` and the associated p-value is set to
461
+ `0.0`.
462
+
463
+ .. versionadded:: 1.1
464
+
465
+ Returns
466
+ -------
467
+ f_statistic : ndarray of shape (n_features,)
468
+ F-statistic for each feature.
469
+
470
+ p_values : ndarray of shape (n_features,)
471
+ P-values associated with the F-statistic.
472
+
473
+ See Also
474
+ --------
475
+ r_regression: Pearson's R between label/feature for regression tasks.
476
+ f_classif: ANOVA F-value between label/feature for classification tasks.
477
+ chi2: Chi-squared stats of non-negative features for classification tasks.
478
+ SelectKBest: Select features based on the k highest scores.
479
+ SelectFpr: Select features based on a false positive rate test.
480
+ SelectFdr: Select features based on an estimated false discovery rate.
481
+ SelectFwe: Select features based on family-wise error rate.
482
+ SelectPercentile: Select features based on percentile of the highest
483
+ scores.
484
+
485
+ Examples
486
+ --------
487
+ >>> from sklearn.datasets import make_regression
488
+ >>> from sklearn.feature_selection import f_regression
489
+ >>> X, y = make_regression(
490
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
491
+ ... )
492
+ >>> f_statistic, p_values = f_regression(X, y)
493
+ >>> f_statistic
494
+ array([1.2...+00, 2.6...+13, 2.6...+00])
495
+ >>> p_values
496
+ array([2.7..., 1.5..., 1.0...])
497
+ """
498
+ correlation_coefficient = r_regression(
499
+ X, y, center=center, force_finite=force_finite
500
+ )
501
+ deg_of_freedom = y.size - (2 if center else 1)
502
+
503
+ corr_coef_squared = correlation_coefficient**2
504
+
505
+ with np.errstate(divide="ignore", invalid="ignore"):
506
+ f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom
507
+ p_values = stats.f.sf(f_statistic, 1, deg_of_freedom)
508
+
509
+ if force_finite and not np.isfinite(f_statistic).all():
510
+ # case where there is a perfect (anti-)correlation
511
+ # f-statistics can be set to the maximum and p-values to zero
512
+ mask_inf = np.isinf(f_statistic)
513
+ f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max
514
+ # case where the target or some features are constant
515
+ # f-statistics would be minimum and thus p-values large
516
+ mask_nan = np.isnan(f_statistic)
517
+ f_statistic[mask_nan] = 0.0
518
+ p_values[mask_nan] = 1.0
519
+ return f_statistic, p_values
520
+
521
+
522
+ ######################################################################
523
+ # Base classes
524
+
525
+
526
+ class _BaseFilter(SelectorMixin, BaseEstimator):
527
+ """Initialize the univariate feature selection.
528
+
529
+ Parameters
530
+ ----------
531
+ score_func : callable
532
+ Function taking two arrays X and y, and returning a pair of arrays
533
+ (scores, pvalues) or a single array with scores.
534
+ """
535
+
536
+ _parameter_constraints: dict = {"score_func": [callable]}
537
+
538
+ def __init__(self, score_func):
539
+ self.score_func = score_func
540
+
541
+ @_fit_context(prefer_skip_nested_validation=True)
542
+ def fit(self, X, y=None):
543
+ """Run score function on (X, y) and get the appropriate features.
544
+
545
+ Parameters
546
+ ----------
547
+ X : array-like of shape (n_samples, n_features)
548
+ The training input samples.
549
+
550
+ y : array-like of shape (n_samples,) or None
551
+ The target values (class labels in classification, real numbers in
552
+ regression). If the selector is unsupervised then `y` can be set to `None`.
553
+
554
+ Returns
555
+ -------
556
+ self : object
557
+ Returns the instance itself.
558
+ """
559
+ if y is None:
560
+ X = self._validate_data(X, accept_sparse=["csr", "csc"])
561
+ else:
562
+ X, y = self._validate_data(
563
+ X, y, accept_sparse=["csr", "csc"], multi_output=True
564
+ )
565
+
566
+ self._check_params(X, y)
567
+ score_func_ret = self.score_func(X, y)
568
+ if isinstance(score_func_ret, (list, tuple)):
569
+ self.scores_, self.pvalues_ = score_func_ret
570
+ self.pvalues_ = np.asarray(self.pvalues_)
571
+ else:
572
+ self.scores_ = score_func_ret
573
+ self.pvalues_ = None
574
+
575
+ self.scores_ = np.asarray(self.scores_)
576
+
577
+ return self
578
+
579
+ def _check_params(self, X, y):
580
+ pass
581
+
582
+ def _more_tags(self):
583
+ return {"requires_y": True}
584
+
585
+
586
+ ######################################################################
587
+ # Specific filters
588
+ ######################################################################
589
+ class SelectPercentile(_BaseFilter):
590
+ """Select features according to a percentile of the highest scores.
591
+
592
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
593
+
594
+ Parameters
595
+ ----------
596
+ score_func : callable, default=f_classif
597
+ Function taking two arrays X and y, and returning a pair of arrays
598
+ (scores, pvalues) or a single array with scores.
599
+ Default is f_classif (see below "See Also"). The default function only
600
+ works with classification tasks.
601
+
602
+ .. versionadded:: 0.18
603
+
604
+ percentile : int, default=10
605
+ Percent of features to keep.
606
+
607
+ Attributes
608
+ ----------
609
+ scores_ : array-like of shape (n_features,)
610
+ Scores of features.
611
+
612
+ pvalues_ : array-like of shape (n_features,)
613
+ p-values of feature scores, None if `score_func` returned only scores.
614
+
615
+ n_features_in_ : int
616
+ Number of features seen during :term:`fit`.
617
+
618
+ .. versionadded:: 0.24
619
+
620
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
621
+ Names of features seen during :term:`fit`. Defined only when `X`
622
+ has feature names that are all strings.
623
+
624
+ .. versionadded:: 1.0
625
+
626
+ See Also
627
+ --------
628
+ f_classif : ANOVA F-value between label/feature for classification tasks.
629
+ mutual_info_classif : Mutual information for a discrete target.
630
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
631
+ f_regression : F-value between label/feature for regression tasks.
632
+ mutual_info_regression : Mutual information for a continuous target.
633
+ SelectKBest : Select features based on the k highest scores.
634
+ SelectFpr : Select features based on a false positive rate test.
635
+ SelectFdr : Select features based on an estimated false discovery rate.
636
+ SelectFwe : Select features based on family-wise error rate.
637
+ GenericUnivariateSelect : Univariate feature selector with configurable
638
+ mode.
639
+
640
+ Notes
641
+ -----
642
+ Ties between features with equal scores will be broken in an unspecified
643
+ way.
644
+
645
+ This filter supports unsupervised feature selection that only requests `X` for
646
+ computing the scores.
647
+
648
+ Examples
649
+ --------
650
+ >>> from sklearn.datasets import load_digits
651
+ >>> from sklearn.feature_selection import SelectPercentile, chi2
652
+ >>> X, y = load_digits(return_X_y=True)
653
+ >>> X.shape
654
+ (1797, 64)
655
+ >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y)
656
+ >>> X_new.shape
657
+ (1797, 7)
658
+ """
659
+
660
+ _parameter_constraints: dict = {
661
+ **_BaseFilter._parameter_constraints,
662
+ "percentile": [Interval(Real, 0, 100, closed="both")],
663
+ }
664
+
665
+ def __init__(self, score_func=f_classif, *, percentile=10):
666
+ super().__init__(score_func=score_func)
667
+ self.percentile = percentile
668
+
669
+ def _get_support_mask(self):
670
+ check_is_fitted(self)
671
+
672
+ # Cater for NaNs
673
+ if self.percentile == 100:
674
+ return np.ones(len(self.scores_), dtype=bool)
675
+ elif self.percentile == 0:
676
+ return np.zeros(len(self.scores_), dtype=bool)
677
+
678
+ scores = _clean_nans(self.scores_)
679
+ threshold = np.percentile(scores, 100 - self.percentile)
680
+ mask = scores > threshold
681
+ ties = np.where(scores == threshold)[0]
682
+ if len(ties):
683
+ max_feats = int(len(scores) * self.percentile / 100)
684
+ kept_ties = ties[: max_feats - mask.sum()]
685
+ mask[kept_ties] = True
686
+ return mask
687
+
688
+ def _more_tags(self):
689
+ return {"requires_y": False}
690
+
691
+
692
+ class SelectKBest(_BaseFilter):
693
+ """Select features according to the k highest scores.
694
+
695
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
696
+
697
+ Parameters
698
+ ----------
699
+ score_func : callable, default=f_classif
700
+ Function taking two arrays X and y, and returning a pair of arrays
701
+ (scores, pvalues) or a single array with scores.
702
+ Default is f_classif (see below "See Also"). The default function only
703
+ works with classification tasks.
704
+
705
+ .. versionadded:: 0.18
706
+
707
+ k : int or "all", default=10
708
+ Number of top features to select.
709
+ The "all" option bypasses selection, for use in a parameter search.
710
+
711
+ Attributes
712
+ ----------
713
+ scores_ : array-like of shape (n_features,)
714
+ Scores of features.
715
+
716
+ pvalues_ : array-like of shape (n_features,)
717
+ p-values of feature scores, None if `score_func` returned only scores.
718
+
719
+ n_features_in_ : int
720
+ Number of features seen during :term:`fit`.
721
+
722
+ .. versionadded:: 0.24
723
+
724
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
725
+ Names of features seen during :term:`fit`. Defined only when `X`
726
+ has feature names that are all strings.
727
+
728
+ .. versionadded:: 1.0
729
+
730
+ See Also
731
+ --------
732
+ f_classif: ANOVA F-value between label/feature for classification tasks.
733
+ mutual_info_classif: Mutual information for a discrete target.
734
+ chi2: Chi-squared stats of non-negative features for classification tasks.
735
+ f_regression: F-value between label/feature for regression tasks.
736
+ mutual_info_regression: Mutual information for a continuous target.
737
+ SelectPercentile: Select features based on percentile of the highest
738
+ scores.
739
+ SelectFpr : Select features based on a false positive rate test.
740
+ SelectFdr : Select features based on an estimated false discovery rate.
741
+ SelectFwe : Select features based on family-wise error rate.
742
+ GenericUnivariateSelect : Univariate feature selector with configurable
743
+ mode.
744
+
745
+ Notes
746
+ -----
747
+ Ties between features with equal scores will be broken in an unspecified
748
+ way.
749
+
750
+ This filter supports unsupervised feature selection that only requests `X` for
751
+ computing the scores.
752
+
753
+ Examples
754
+ --------
755
+ >>> from sklearn.datasets import load_digits
756
+ >>> from sklearn.feature_selection import SelectKBest, chi2
757
+ >>> X, y = load_digits(return_X_y=True)
758
+ >>> X.shape
759
+ (1797, 64)
760
+ >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y)
761
+ >>> X_new.shape
762
+ (1797, 20)
763
+ """
764
+
765
+ _parameter_constraints: dict = {
766
+ **_BaseFilter._parameter_constraints,
767
+ "k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")],
768
+ }
769
+
770
+ def __init__(self, score_func=f_classif, *, k=10):
771
+ super().__init__(score_func=score_func)
772
+ self.k = k
773
+
774
+ def _check_params(self, X, y):
775
+ if not isinstance(self.k, str) and self.k > X.shape[1]:
776
+ warnings.warn(
777
+ f"k={self.k} is greater than n_features={X.shape[1]}. "
778
+ "All the features will be returned."
779
+ )
780
+
781
+ def _get_support_mask(self):
782
+ check_is_fitted(self)
783
+
784
+ if self.k == "all":
785
+ return np.ones(self.scores_.shape, dtype=bool)
786
+ elif self.k == 0:
787
+ return np.zeros(self.scores_.shape, dtype=bool)
788
+ else:
789
+ scores = _clean_nans(self.scores_)
790
+ mask = np.zeros(scores.shape, dtype=bool)
791
+
792
+ # Request a stable sort. Mergesort takes more memory (~40MB per
793
+ # megafeature on x86-64).
794
+ mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1
795
+ return mask
796
+
797
+ def _more_tags(self):
798
+ return {"requires_y": False}
799
+
800
+
801
+ class SelectFpr(_BaseFilter):
802
+ """Filter: Select the pvalues below alpha based on a FPR test.
803
+
804
+ FPR test stands for False Positive Rate test. It controls the total
805
+ amount of false detections.
806
+
807
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
808
+
809
+ Parameters
810
+ ----------
811
+ score_func : callable, default=f_classif
812
+ Function taking two arrays X and y, and returning a pair of arrays
813
+ (scores, pvalues).
814
+ Default is f_classif (see below "See Also"). The default function only
815
+ works with classification tasks.
816
+
817
+ alpha : float, default=5e-2
818
+ Features with p-values less than `alpha` are selected.
819
+
820
+ Attributes
821
+ ----------
822
+ scores_ : array-like of shape (n_features,)
823
+ Scores of features.
824
+
825
+ pvalues_ : array-like of shape (n_features,)
826
+ p-values of feature scores.
827
+
828
+ n_features_in_ : int
829
+ Number of features seen during :term:`fit`.
830
+
831
+ .. versionadded:: 0.24
832
+
833
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
834
+ Names of features seen during :term:`fit`. Defined only when `X`
835
+ has feature names that are all strings.
836
+
837
+ .. versionadded:: 1.0
838
+
839
+ See Also
840
+ --------
841
+ f_classif : ANOVA F-value between label/feature for classification tasks.
842
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
843
+ mutual_info_classif: Mutual information for a discrete target.
844
+ f_regression : F-value between label/feature for regression tasks.
845
+ mutual_info_regression : Mutual information for a continuous target.
846
+ SelectPercentile : Select features based on percentile of the highest
847
+ scores.
848
+ SelectKBest : Select features based on the k highest scores.
849
+ SelectFdr : Select features based on an estimated false discovery rate.
850
+ SelectFwe : Select features based on family-wise error rate.
851
+ GenericUnivariateSelect : Univariate feature selector with configurable
852
+ mode.
853
+
854
+ Examples
855
+ --------
856
+ >>> from sklearn.datasets import load_breast_cancer
857
+ >>> from sklearn.feature_selection import SelectFpr, chi2
858
+ >>> X, y = load_breast_cancer(return_X_y=True)
859
+ >>> X.shape
860
+ (569, 30)
861
+ >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y)
862
+ >>> X_new.shape
863
+ (569, 16)
864
+ """
865
+
866
+ _parameter_constraints: dict = {
867
+ **_BaseFilter._parameter_constraints,
868
+ "alpha": [Interval(Real, 0, 1, closed="both")],
869
+ }
870
+
871
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
872
+ super().__init__(score_func=score_func)
873
+ self.alpha = alpha
874
+
875
+ def _get_support_mask(self):
876
+ check_is_fitted(self)
877
+
878
+ return self.pvalues_ < self.alpha
879
+
880
+
881
+ class SelectFdr(_BaseFilter):
882
+ """Filter: Select the p-values for an estimated false discovery rate.
883
+
884
+ This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
885
+ on the expected false discovery rate.
886
+
887
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
888
+
889
+ Parameters
890
+ ----------
891
+ score_func : callable, default=f_classif
892
+ Function taking two arrays X and y, and returning a pair of arrays
893
+ (scores, pvalues).
894
+ Default is f_classif (see below "See Also"). The default function only
895
+ works with classification tasks.
896
+
897
+ alpha : float, default=5e-2
898
+ The highest uncorrected p-value for features to keep.
899
+
900
+ Attributes
901
+ ----------
902
+ scores_ : array-like of shape (n_features,)
903
+ Scores of features.
904
+
905
+ pvalues_ : array-like of shape (n_features,)
906
+ p-values of feature scores.
907
+
908
+ n_features_in_ : int
909
+ Number of features seen during :term:`fit`.
910
+
911
+ .. versionadded:: 0.24
912
+
913
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
914
+ Names of features seen during :term:`fit`. Defined only when `X`
915
+ has feature names that are all strings.
916
+
917
+ .. versionadded:: 1.0
918
+
919
+ See Also
920
+ --------
921
+ f_classif : ANOVA F-value between label/feature for classification tasks.
922
+ mutual_info_classif : Mutual information for a discrete target.
923
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
924
+ f_regression : F-value between label/feature for regression tasks.
925
+ mutual_info_regression : Mutual information for a continuous target.
926
+ SelectPercentile : Select features based on percentile of the highest
927
+ scores.
928
+ SelectKBest : Select features based on the k highest scores.
929
+ SelectFpr : Select features based on a false positive rate test.
930
+ SelectFwe : Select features based on family-wise error rate.
931
+ GenericUnivariateSelect : Univariate feature selector with configurable
932
+ mode.
933
+
934
+ References
935
+ ----------
936
+ https://en.wikipedia.org/wiki/False_discovery_rate
937
+
938
+ Examples
939
+ --------
940
+ >>> from sklearn.datasets import load_breast_cancer
941
+ >>> from sklearn.feature_selection import SelectFdr, chi2
942
+ >>> X, y = load_breast_cancer(return_X_y=True)
943
+ >>> X.shape
944
+ (569, 30)
945
+ >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y)
946
+ >>> X_new.shape
947
+ (569, 16)
948
+ """
949
+
950
+ _parameter_constraints: dict = {
951
+ **_BaseFilter._parameter_constraints,
952
+ "alpha": [Interval(Real, 0, 1, closed="both")],
953
+ }
954
+
955
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
956
+ super().__init__(score_func=score_func)
957
+ self.alpha = alpha
958
+
959
+ def _get_support_mask(self):
960
+ check_is_fitted(self)
961
+
962
+ n_features = len(self.pvalues_)
963
+ sv = np.sort(self.pvalues_)
964
+ selected = sv[
965
+ sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1)
966
+ ]
967
+ if selected.size == 0:
968
+ return np.zeros_like(self.pvalues_, dtype=bool)
969
+ return self.pvalues_ <= selected.max()
970
+
971
+
972
+ class SelectFwe(_BaseFilter):
973
+ """Filter: Select the p-values corresponding to Family-wise error rate.
974
+
975
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
976
+
977
+ Parameters
978
+ ----------
979
+ score_func : callable, default=f_classif
980
+ Function taking two arrays X and y, and returning a pair of arrays
981
+ (scores, pvalues).
982
+ Default is f_classif (see below "See Also"). The default function only
983
+ works with classification tasks.
984
+
985
+ alpha : float, default=5e-2
986
+ The highest uncorrected p-value for features to keep.
987
+
988
+ Attributes
989
+ ----------
990
+ scores_ : array-like of shape (n_features,)
991
+ Scores of features.
992
+
993
+ pvalues_ : array-like of shape (n_features,)
994
+ p-values of feature scores.
995
+
996
+ n_features_in_ : int
997
+ Number of features seen during :term:`fit`.
998
+
999
+ .. versionadded:: 0.24
1000
+
1001
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1002
+ Names of features seen during :term:`fit`. Defined only when `X`
1003
+ has feature names that are all strings.
1004
+
1005
+ .. versionadded:: 1.0
1006
+
1007
+ See Also
1008
+ --------
1009
+ f_classif : ANOVA F-value between label/feature for classification tasks.
1010
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
1011
+ f_regression : F-value between label/feature for regression tasks.
1012
+ SelectPercentile : Select features based on percentile of the highest
1013
+ scores.
1014
+ SelectKBest : Select features based on the k highest scores.
1015
+ SelectFpr : Select features based on a false positive rate test.
1016
+ SelectFdr : Select features based on an estimated false discovery rate.
1017
+ GenericUnivariateSelect : Univariate feature selector with configurable
1018
+ mode.
1019
+
1020
+ Examples
1021
+ --------
1022
+ >>> from sklearn.datasets import load_breast_cancer
1023
+ >>> from sklearn.feature_selection import SelectFwe, chi2
1024
+ >>> X, y = load_breast_cancer(return_X_y=True)
1025
+ >>> X.shape
1026
+ (569, 30)
1027
+ >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y)
1028
+ >>> X_new.shape
1029
+ (569, 15)
1030
+ """
1031
+
1032
+ _parameter_constraints: dict = {
1033
+ **_BaseFilter._parameter_constraints,
1034
+ "alpha": [Interval(Real, 0, 1, closed="both")],
1035
+ }
1036
+
1037
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
1038
+ super().__init__(score_func=score_func)
1039
+ self.alpha = alpha
1040
+
1041
+ def _get_support_mask(self):
1042
+ check_is_fitted(self)
1043
+
1044
+ return self.pvalues_ < self.alpha / len(self.pvalues_)
1045
+
1046
+
1047
+ ######################################################################
1048
+ # Generic filter
1049
+ ######################################################################
1050
+
1051
+
1052
+ # TODO this class should fit on either p-values or scores,
1053
+ # depending on the mode.
1054
+ class GenericUnivariateSelect(_BaseFilter):
1055
+ """Univariate feature selector with configurable strategy.
1056
+
1057
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+ score_func : callable, default=f_classif
1062
+ Function taking two arrays X and y, and returning a pair of arrays
1063
+ (scores, pvalues). For modes 'percentile' or 'kbest' it can return
1064
+ a single array scores.
1065
+
1066
+ mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'
1067
+ Feature selection mode. Note that the `'percentile'` and `'kbest'`
1068
+ modes are supporting unsupervised feature selection (when `y` is `None`).
1069
+
1070
+ param : "all", float or int, default=1e-5
1071
+ Parameter of the corresponding mode.
1072
+
1073
+ Attributes
1074
+ ----------
1075
+ scores_ : array-like of shape (n_features,)
1076
+ Scores of features.
1077
+
1078
+ pvalues_ : array-like of shape (n_features,)
1079
+ p-values of feature scores, None if `score_func` returned scores only.
1080
+
1081
+ n_features_in_ : int
1082
+ Number of features seen during :term:`fit`.
1083
+
1084
+ .. versionadded:: 0.24
1085
+
1086
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1087
+ Names of features seen during :term:`fit`. Defined only when `X`
1088
+ has feature names that are all strings.
1089
+
1090
+ .. versionadded:: 1.0
1091
+
1092
+ See Also
1093
+ --------
1094
+ f_classif : ANOVA F-value between label/feature for classification tasks.
1095
+ mutual_info_classif : Mutual information for a discrete target.
1096
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
1097
+ f_regression : F-value between label/feature for regression tasks.
1098
+ mutual_info_regression : Mutual information for a continuous target.
1099
+ SelectPercentile : Select features based on percentile of the highest
1100
+ scores.
1101
+ SelectKBest : Select features based on the k highest scores.
1102
+ SelectFpr : Select features based on a false positive rate test.
1103
+ SelectFdr : Select features based on an estimated false discovery rate.
1104
+ SelectFwe : Select features based on family-wise error rate.
1105
+
1106
+ Examples
1107
+ --------
1108
+ >>> from sklearn.datasets import load_breast_cancer
1109
+ >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2
1110
+ >>> X, y = load_breast_cancer(return_X_y=True)
1111
+ >>> X.shape
1112
+ (569, 30)
1113
+ >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20)
1114
+ >>> X_new = transformer.fit_transform(X, y)
1115
+ >>> X_new.shape
1116
+ (569, 20)
1117
+ """
1118
+
1119
+ _selection_modes: dict = {
1120
+ "percentile": SelectPercentile,
1121
+ "k_best": SelectKBest,
1122
+ "fpr": SelectFpr,
1123
+ "fdr": SelectFdr,
1124
+ "fwe": SelectFwe,
1125
+ }
1126
+
1127
+ _parameter_constraints: dict = {
1128
+ **_BaseFilter._parameter_constraints,
1129
+ "mode": [StrOptions(set(_selection_modes.keys()))],
1130
+ "param": [Interval(Real, 0, None, closed="left"), StrOptions({"all"})],
1131
+ }
1132
+
1133
+ def __init__(self, score_func=f_classif, *, mode="percentile", param=1e-5):
1134
+ super().__init__(score_func=score_func)
1135
+ self.mode = mode
1136
+ self.param = param
1137
+
1138
+ def _make_selector(self):
1139
+ selector = self._selection_modes[self.mode](score_func=self.score_func)
1140
+
1141
+ # Now perform some acrobatics to set the right named parameter in
1142
+ # the selector
1143
+ possible_params = selector._get_param_names()
1144
+ possible_params.remove("score_func")
1145
+ selector.set_params(**{possible_params[0]: self.param})
1146
+
1147
+ return selector
1148
+
1149
+ def _more_tags(self):
1150
+ return {"preserves_dtype": [np.float64, np.float32]}
1151
+
1152
+ def _check_params(self, X, y):
1153
+ self._make_selector()._check_params(X, y)
1154
+
1155
+ def _get_support_mask(self):
1156
+ check_is_fitted(self)
1157
+
1158
+ selector = self._make_selector()
1159
+ selector.pvalues_ = self.pvalues_
1160
+ selector.scores_ = self.scores_
1161
+ return selector._get_support_mask()
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Lars Buitinck
2
+ # License: 3-clause BSD
3
+ from numbers import Real
4
+
5
+ import numpy as np
6
+
7
+ from ..base import BaseEstimator, _fit_context
8
+ from ..utils._param_validation import Interval
9
+ from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
10
+ from ..utils.validation import check_is_fitted
11
+ from ._base import SelectorMixin
12
+
13
+
14
+ class VarianceThreshold(SelectorMixin, BaseEstimator):
15
+ """Feature selector that removes all low-variance features.
16
+
17
+ This feature selection algorithm looks only at the features (X), not the
18
+ desired outputs (y), and can thus be used for unsupervised learning.
19
+
20
+ Read more in the :ref:`User Guide <variance_threshold>`.
21
+
22
+ Parameters
23
+ ----------
24
+ threshold : float, default=0
25
+ Features with a training-set variance lower than this threshold will
26
+ be removed. The default is to keep all features with non-zero variance,
27
+ i.e. remove the features that have the same value in all samples.
28
+
29
+ Attributes
30
+ ----------
31
+ variances_ : array, shape (n_features,)
32
+ Variances of individual features.
33
+
34
+ n_features_in_ : int
35
+ Number of features seen during :term:`fit`.
36
+
37
+ .. versionadded:: 0.24
38
+
39
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
40
+ Names of features seen during :term:`fit`. Defined only when `X`
41
+ has feature names that are all strings.
42
+
43
+ .. versionadded:: 1.0
44
+
45
+ See Also
46
+ --------
47
+ SelectFromModel: Meta-transformer for selecting features based on
48
+ importance weights.
49
+ SelectPercentile : Select features according to a percentile of the highest
50
+ scores.
51
+ SequentialFeatureSelector : Transformer that performs Sequential Feature
52
+ Selection.
53
+
54
+ Notes
55
+ -----
56
+ Allows NaN in the input.
57
+ Raises ValueError if no feature in X meets the variance threshold.
58
+
59
+ Examples
60
+ --------
61
+ The following dataset has integer features, two of which are the same
62
+ in every sample. These are removed with the default setting for threshold::
63
+
64
+ >>> from sklearn.feature_selection import VarianceThreshold
65
+ >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
66
+ >>> selector = VarianceThreshold()
67
+ >>> selector.fit_transform(X)
68
+ array([[2, 0],
69
+ [1, 4],
70
+ [1, 1]])
71
+ """
72
+
73
+ _parameter_constraints: dict = {
74
+ "threshold": [Interval(Real, 0, None, closed="left")]
75
+ }
76
+
77
+ def __init__(self, threshold=0.0):
78
+ self.threshold = threshold
79
+
80
+ @_fit_context(prefer_skip_nested_validation=True)
81
+ def fit(self, X, y=None):
82
+ """Learn empirical variances from X.
83
+
84
+ Parameters
85
+ ----------
86
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
87
+ Data from which to compute variances, where `n_samples` is
88
+ the number of samples and `n_features` is the number of features.
89
+
90
+ y : any, default=None
91
+ Ignored. This parameter exists only for compatibility with
92
+ sklearn.pipeline.Pipeline.
93
+
94
+ Returns
95
+ -------
96
+ self : object
97
+ Returns the instance itself.
98
+ """
99
+ X = self._validate_data(
100
+ X,
101
+ accept_sparse=("csr", "csc"),
102
+ dtype=np.float64,
103
+ force_all_finite="allow-nan",
104
+ )
105
+
106
+ if hasattr(X, "toarray"): # sparse matrix
107
+ _, self.variances_ = mean_variance_axis(X, axis=0)
108
+ if self.threshold == 0:
109
+ mins, maxes = min_max_axis(X, axis=0)
110
+ peak_to_peaks = maxes - mins
111
+ else:
112
+ self.variances_ = np.nanvar(X, axis=0)
113
+ if self.threshold == 0:
114
+ peak_to_peaks = np.ptp(X, axis=0)
115
+
116
+ if self.threshold == 0:
117
+ # Use peak-to-peak to avoid numeric precision issues
118
+ # for constant features
119
+ compare_arr = np.array([self.variances_, peak_to_peaks])
120
+ self.variances_ = np.nanmin(compare_arr, axis=0)
121
+
122
+ if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
123
+ msg = "No feature in X meets the variance threshold {0:.5f}"
124
+ if X.shape[0] == 1:
125
+ msg += " (X contains only one sample)"
126
+ raise ValueError(msg.format(self.threshold))
127
+
128
+ return self
129
+
130
+ def _get_support_mask(self):
131
+ check_is_fitted(self)
132
+
133
+ return self.variances_ > self.threshold
134
+
135
+ def _more_tags(self):
136
+ return {"allow_nan": True}
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc ADDED
Binary file (2.95 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_array_equal
4
+
5
+ from sklearn.base import BaseEstimator
6
+ from sklearn.feature_selection._base import SelectorMixin
7
+ from sklearn.utils.fixes import CSC_CONTAINERS
8
+
9
+
10
+ class StepSelector(SelectorMixin, BaseEstimator):
11
+ """Retain every `step` features (beginning with 0).
12
+
13
+ If `step < 1`, then no features are selected.
14
+ """
15
+
16
+ def __init__(self, step=2):
17
+ self.step = step
18
+
19
+ def fit(self, X, y=None):
20
+ X = self._validate_data(X, accept_sparse="csc")
21
+ return self
22
+
23
+ def _get_support_mask(self):
24
+ mask = np.zeros(self.n_features_in_, dtype=bool)
25
+ if self.step >= 1:
26
+ mask[:: self.step] = True
27
+ return mask
28
+
29
+
30
+ support = [True, False] * 5
31
+ support_inds = [0, 2, 4, 6, 8]
32
+ X = np.arange(20).reshape(2, 10)
33
+ Xt = np.arange(0, 20, 2).reshape(2, 5)
34
+ Xinv = X.copy()
35
+ Xinv[:, 1::2] = 0
36
+ y = [0, 1]
37
+ feature_names = list("ABCDEFGHIJ")
38
+ feature_names_t = feature_names[::2]
39
+ feature_names_inv = np.array(feature_names)
40
+ feature_names_inv[1::2] = ""
41
+
42
+
43
+ def test_transform_dense():
44
+ sel = StepSelector()
45
+ Xt_actual = sel.fit(X, y).transform(X)
46
+ Xt_actual2 = StepSelector().fit_transform(X, y)
47
+ assert_array_equal(Xt, Xt_actual)
48
+ assert_array_equal(Xt, Xt_actual2)
49
+
50
+ # Check dtype matches
51
+ assert np.int32 == sel.transform(X.astype(np.int32)).dtype
52
+ assert np.float32 == sel.transform(X.astype(np.float32)).dtype
53
+
54
+ # Check 1d list and other dtype:
55
+ names_t_actual = sel.transform([feature_names])
56
+ assert_array_equal(feature_names_t, names_t_actual.ravel())
57
+
58
+ # Check wrong shape raises error
59
+ with pytest.raises(ValueError):
60
+ sel.transform(np.array([[1], [2]]))
61
+
62
+
63
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
64
+ def test_transform_sparse(csc_container):
65
+ X_sp = csc_container(X)
66
+ sel = StepSelector()
67
+ Xt_actual = sel.fit(X_sp).transform(X_sp)
68
+ Xt_actual2 = sel.fit_transform(X_sp)
69
+ assert_array_equal(Xt, Xt_actual.toarray())
70
+ assert_array_equal(Xt, Xt_actual2.toarray())
71
+
72
+ # Check dtype matches
73
+ assert np.int32 == sel.transform(X_sp.astype(np.int32)).dtype
74
+ assert np.float32 == sel.transform(X_sp.astype(np.float32)).dtype
75
+
76
+ # Check wrong shape raises error
77
+ with pytest.raises(ValueError):
78
+ sel.transform(np.array([[1], [2]]))
79
+
80
+
81
+ def test_inverse_transform_dense():
82
+ sel = StepSelector()
83
+ Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
84
+ assert_array_equal(Xinv, Xinv_actual)
85
+
86
+ # Check dtype matches
87
+ assert np.int32 == sel.inverse_transform(Xt.astype(np.int32)).dtype
88
+ assert np.float32 == sel.inverse_transform(Xt.astype(np.float32)).dtype
89
+
90
+ # Check 1d list and other dtype:
91
+ names_inv_actual = sel.inverse_transform([feature_names_t])
92
+ assert_array_equal(feature_names_inv, names_inv_actual.ravel())
93
+
94
+ # Check wrong shape raises error
95
+ with pytest.raises(ValueError):
96
+ sel.inverse_transform(np.array([[1], [2]]))
97
+
98
+
99
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
100
+ def test_inverse_transform_sparse(csc_container):
101
+ X_sp = csc_container(X)
102
+ Xt_sp = csc_container(Xt)
103
+ sel = StepSelector()
104
+ Xinv_actual = sel.fit(X_sp).inverse_transform(Xt_sp)
105
+ assert_array_equal(Xinv, Xinv_actual.toarray())
106
+
107
+ # Check dtype matches
108
+ assert np.int32 == sel.inverse_transform(Xt_sp.astype(np.int32)).dtype
109
+ assert np.float32 == sel.inverse_transform(Xt_sp.astype(np.float32)).dtype
110
+
111
+ # Check wrong shape raises error
112
+ with pytest.raises(ValueError):
113
+ sel.inverse_transform(np.array([[1], [2]]))
114
+
115
+
116
+ def test_get_support():
117
+ sel = StepSelector()
118
+ sel.fit(X, y)
119
+ assert_array_equal(support, sel.get_support())
120
+ assert_array_equal(support_inds, sel.get_support(indices=True))
121
+
122
+
123
+ def test_output_dataframe():
124
+ """Check output dtypes for dataframes is consistent with the input dtypes."""
125
+ pd = pytest.importorskip("pandas")
126
+
127
+ X = pd.DataFrame(
128
+ {
129
+ "a": pd.Series([1.0, 2.4, 4.5], dtype=np.float32),
130
+ "b": pd.Series(["a", "b", "a"], dtype="category"),
131
+ "c": pd.Series(["j", "b", "b"], dtype="category"),
132
+ "d": pd.Series([3.0, 2.4, 1.2], dtype=np.float64),
133
+ }
134
+ )
135
+
136
+ for step in [2, 3]:
137
+ sel = StepSelector(step=step).set_output(transform="pandas")
138
+ sel.fit(X)
139
+
140
+ output = sel.transform(X)
141
+ for name, dtype in output.dtypes.items():
142
+ assert dtype == X.dtypes[name]
143
+
144
+ # step=0 will select nothing
145
+ sel0 = StepSelector(step=0).set_output(transform="pandas")
146
+ sel0.fit(X, y)
147
+
148
+ msg = "No features were selected"
149
+ with pytest.warns(UserWarning, match=msg):
150
+ output0 = sel0.transform(X)
151
+
152
+ assert_array_equal(output0.index, X.index)
153
+ assert output0.shape == (X.shape[0], 0)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for chi2, currently the only feature selection function designed
3
+ specifically to work with sparse matrices.
4
+ """
5
+
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytest
10
+ import scipy.stats
11
+
12
+ from sklearn.feature_selection import SelectKBest, chi2
13
+ from sklearn.feature_selection._univariate_selection import _chisquare
14
+ from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal
15
+ from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS
16
+
17
+ # Feature 0 is highly informative for class 1;
18
+ # feature 1 is the same everywhere;
19
+ # feature 2 is a bit informative for class 2.
20
+ X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]]
21
+ y = [0, 1, 2, 2]
22
+
23
+
24
+ def mkchi2(k):
25
+ """Make k-best chi2 selector"""
26
+ return SelectKBest(chi2, k=k)
27
+
28
+
29
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
30
+ def test_chi2(csr_container):
31
+ # Test Chi2 feature extraction
32
+
33
+ chi2 = mkchi2(k=1).fit(X, y)
34
+ chi2 = mkchi2(k=1).fit(X, y)
35
+ assert_array_equal(chi2.get_support(indices=True), [0])
36
+ assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
37
+
38
+ chi2 = mkchi2(k=2).fit(X, y)
39
+ assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
40
+
41
+ Xsp = csr_container(X, dtype=np.float64)
42
+ chi2 = mkchi2(k=2).fit(Xsp, y)
43
+ assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
44
+ Xtrans = chi2.transform(Xsp)
45
+ assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
46
+
47
+ # == doesn't work on scipy.sparse matrices
48
+ Xtrans = Xtrans.toarray()
49
+ Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
50
+ assert_array_almost_equal(Xtrans, Xtrans2)
51
+
52
+
53
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
54
+ def test_chi2_coo(coo_container):
55
+ # Check that chi2 works with a COO matrix
56
+ # (as returned by CountVectorizer, DictVectorizer)
57
+ Xcoo = coo_container(X)
58
+ mkchi2(k=2).fit_transform(Xcoo, y)
59
+ # if we got here without an exception, we're safe
60
+
61
+
62
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
63
+ def test_chi2_negative(csr_container):
64
+ # Check for proper error on negative numbers in the input X.
65
+ X, y = [[0, 1], [-1e-20, 1]], [0, 1]
66
+ for X in (X, np.array(X), csr_container(X)):
67
+ with pytest.raises(ValueError):
68
+ chi2(X, y)
69
+
70
+
71
+ def test_chi2_unused_feature():
72
+ # Unused feature should evaluate to NaN
73
+ # and should issue no runtime warning
74
+ with warnings.catch_warnings(record=True) as warned:
75
+ warnings.simplefilter("always")
76
+ chi, p = chi2([[1, 0], [0, 0]], [1, 0])
77
+ for w in warned:
78
+ if "divide by zero" in repr(w):
79
+ raise AssertionError("Found unexpected warning %s" % w)
80
+ assert_array_equal(chi, [1, np.nan])
81
+ assert_array_equal(p[1], np.nan)
82
+
83
+
84
+ def test_chisquare():
85
+ # Test replacement for scipy.stats.chisquare against the original.
86
+ obs = np.array([[2.0, 2.0], [1.0, 1.0]])
87
+ exp = np.array([[1.5, 1.5], [1.5, 1.5]])
88
+ # call SciPy first because our version overwrites obs
89
+ chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
90
+ chi_our, p_our = _chisquare(obs, exp)
91
+
92
+ assert_array_almost_equal(chi_scp, chi_our)
93
+ assert_array_almost_equal(p_scp, p_our)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py ADDED
@@ -0,0 +1,1017 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Todo: cross-check the F-value with stats model
3
+ """
4
+ import itertools
5
+ import warnings
6
+
7
+ import numpy as np
8
+ import pytest
9
+ from numpy.testing import assert_allclose
10
+ from scipy import sparse, stats
11
+
12
+ from sklearn.datasets import load_iris, make_classification, make_regression
13
+ from sklearn.feature_selection import (
14
+ GenericUnivariateSelect,
15
+ SelectFdr,
16
+ SelectFpr,
17
+ SelectFwe,
18
+ SelectKBest,
19
+ SelectPercentile,
20
+ chi2,
21
+ f_classif,
22
+ f_oneway,
23
+ f_regression,
24
+ mutual_info_classif,
25
+ mutual_info_regression,
26
+ r_regression,
27
+ )
28
+ from sklearn.utils import safe_mask
29
+ from sklearn.utils._testing import (
30
+ _convert_container,
31
+ assert_almost_equal,
32
+ assert_array_almost_equal,
33
+ assert_array_equal,
34
+ ignore_warnings,
35
+ )
36
+ from sklearn.utils.fixes import CSR_CONTAINERS
37
+
38
+ ##############################################################################
39
+ # Test the score functions
40
+
41
+
42
+ def test_f_oneway_vs_scipy_stats():
43
+ # Test that our f_oneway gives the same result as scipy.stats
44
+ rng = np.random.RandomState(0)
45
+ X1 = rng.randn(10, 3)
46
+ X2 = 1 + rng.randn(10, 3)
47
+ f, pv = stats.f_oneway(X1, X2)
48
+ f2, pv2 = f_oneway(X1, X2)
49
+ assert np.allclose(f, f2)
50
+ assert np.allclose(pv, pv2)
51
+
52
+
53
+ def test_f_oneway_ints():
54
+ # Smoke test f_oneway on integers: that it does raise casting errors
55
+ # with recent numpys
56
+ rng = np.random.RandomState(0)
57
+ X = rng.randint(10, size=(10, 10))
58
+ y = np.arange(10)
59
+ fint, pint = f_oneway(X, y)
60
+
61
+ # test that is gives the same result as with float
62
+ f, p = f_oneway(X.astype(float), y)
63
+ assert_array_almost_equal(f, fint, decimal=4)
64
+ assert_array_almost_equal(p, pint, decimal=4)
65
+
66
+
67
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
68
+ def test_f_classif(csr_container):
69
+ # Test whether the F test yields meaningful results
70
+ # on a simple simulated classification problem
71
+ X, y = make_classification(
72
+ n_samples=200,
73
+ n_features=20,
74
+ n_informative=3,
75
+ n_redundant=2,
76
+ n_repeated=0,
77
+ n_classes=8,
78
+ n_clusters_per_class=1,
79
+ flip_y=0.0,
80
+ class_sep=10,
81
+ shuffle=False,
82
+ random_state=0,
83
+ )
84
+
85
+ F, pv = f_classif(X, y)
86
+ F_sparse, pv_sparse = f_classif(csr_container(X), y)
87
+ assert (F > 0).all()
88
+ assert (pv > 0).all()
89
+ assert (pv < 1).all()
90
+ assert (pv[:5] < 0.05).all()
91
+ assert (pv[5:] > 1.0e-4).all()
92
+ assert_array_almost_equal(F_sparse, F)
93
+ assert_array_almost_equal(pv_sparse, pv)
94
+
95
+
96
+ @pytest.mark.parametrize("center", [True, False])
97
+ def test_r_regression(center):
98
+ X, y = make_regression(
99
+ n_samples=2000, n_features=20, n_informative=5, shuffle=False, random_state=0
100
+ )
101
+
102
+ corr_coeffs = r_regression(X, y, center=center)
103
+ assert (-1 < corr_coeffs).all()
104
+ assert (corr_coeffs < 1).all()
105
+
106
+ sparse_X = _convert_container(X, "sparse")
107
+
108
+ sparse_corr_coeffs = r_regression(sparse_X, y, center=center)
109
+ assert_allclose(sparse_corr_coeffs, corr_coeffs)
110
+
111
+ # Testing against numpy for reference
112
+ Z = np.hstack((X, y[:, np.newaxis]))
113
+ correlation_matrix = np.corrcoef(Z, rowvar=False)
114
+ np_corr_coeffs = correlation_matrix[:-1, -1]
115
+ assert_array_almost_equal(np_corr_coeffs, corr_coeffs, decimal=3)
116
+
117
+
118
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
119
+ def test_f_regression(csr_container):
120
+ # Test whether the F test yields meaningful results
121
+ # on a simple simulated regression problem
122
+ X, y = make_regression(
123
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
124
+ )
125
+
126
+ F, pv = f_regression(X, y)
127
+ assert (F > 0).all()
128
+ assert (pv > 0).all()
129
+ assert (pv < 1).all()
130
+ assert (pv[:5] < 0.05).all()
131
+ assert (pv[5:] > 1.0e-4).all()
132
+
133
+ # with centering, compare with sparse
134
+ F, pv = f_regression(X, y, center=True)
135
+ F_sparse, pv_sparse = f_regression(csr_container(X), y, center=True)
136
+ assert_allclose(F_sparse, F)
137
+ assert_allclose(pv_sparse, pv)
138
+
139
+ # again without centering, compare with sparse
140
+ F, pv = f_regression(X, y, center=False)
141
+ F_sparse, pv_sparse = f_regression(csr_container(X), y, center=False)
142
+ assert_allclose(F_sparse, F)
143
+ assert_allclose(pv_sparse, pv)
144
+
145
+
146
+ def test_f_regression_input_dtype():
147
+ # Test whether f_regression returns the same value
148
+ # for any numeric data_type
149
+ rng = np.random.RandomState(0)
150
+ X = rng.rand(10, 20)
151
+ y = np.arange(10).astype(int)
152
+
153
+ F1, pv1 = f_regression(X, y)
154
+ F2, pv2 = f_regression(X, y.astype(float))
155
+ assert_allclose(F1, F2, 5)
156
+ assert_allclose(pv1, pv2, 5)
157
+
158
+
159
+ def test_f_regression_center():
160
+ # Test whether f_regression preserves dof according to 'center' argument
161
+ # We use two centered variates so we have a simple relationship between
162
+ # F-score with variates centering and F-score without variates centering.
163
+ # Create toy example
164
+ X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
165
+ n_samples = X.size
166
+ Y = np.ones(n_samples)
167
+ Y[::2] *= -1.0
168
+ Y[0] = 0.0 # have Y mean being null
169
+
170
+ F1, _ = f_regression(X, Y, center=True)
171
+ F2, _ = f_regression(X, Y, center=False)
172
+ assert_allclose(F1 * (n_samples - 1.0) / (n_samples - 2.0), F2)
173
+ assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
174
+
175
+
176
+ @pytest.mark.parametrize(
177
+ "X, y, expected_corr_coef, force_finite",
178
+ [
179
+ (
180
+ # A feature in X is constant - forcing finite
181
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
182
+ np.array([0, 1, 1, 0]),
183
+ np.array([0.0, 0.32075]),
184
+ True,
185
+ ),
186
+ (
187
+ # The target y is constant - forcing finite
188
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
189
+ np.array([0, 0, 0, 0]),
190
+ np.array([0.0, 0.0]),
191
+ True,
192
+ ),
193
+ (
194
+ # A feature in X is constant - not forcing finite
195
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
196
+ np.array([0, 1, 1, 0]),
197
+ np.array([np.nan, 0.32075]),
198
+ False,
199
+ ),
200
+ (
201
+ # The target y is constant - not forcing finite
202
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
203
+ np.array([0, 0, 0, 0]),
204
+ np.array([np.nan, np.nan]),
205
+ False,
206
+ ),
207
+ ],
208
+ )
209
+ def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite):
210
+ """Check the behaviour of `force_finite` for some corner cases with `r_regression`.
211
+
212
+ Non-regression test for:
213
+ https://github.com/scikit-learn/scikit-learn/issues/15672
214
+ """
215
+ with warnings.catch_warnings():
216
+ warnings.simplefilter("error", RuntimeWarning)
217
+ corr_coef = r_regression(X, y, force_finite=force_finite)
218
+ np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef)
219
+
220
+
221
+ @pytest.mark.parametrize(
222
+ "X, y, expected_f_statistic, expected_p_values, force_finite",
223
+ [
224
+ (
225
+ # A feature in X is constant - forcing finite
226
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
227
+ np.array([0, 1, 1, 0]),
228
+ np.array([0.0, 0.2293578]),
229
+ np.array([1.0, 0.67924985]),
230
+ True,
231
+ ),
232
+ (
233
+ # The target y is constant - forcing finite
234
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
235
+ np.array([0, 0, 0, 0]),
236
+ np.array([0.0, 0.0]),
237
+ np.array([1.0, 1.0]),
238
+ True,
239
+ ),
240
+ (
241
+ # Feature in X correlated with y - forcing finite
242
+ np.array([[0, 1], [1, 0], [2, 10], [3, 4]]),
243
+ np.array([0, 1, 2, 3]),
244
+ np.array([np.finfo(np.float64).max, 0.845433]),
245
+ np.array([0.0, 0.454913]),
246
+ True,
247
+ ),
248
+ (
249
+ # Feature in X anti-correlated with y - forcing finite
250
+ np.array([[3, 1], [2, 0], [1, 10], [0, 4]]),
251
+ np.array([0, 1, 2, 3]),
252
+ np.array([np.finfo(np.float64).max, 0.845433]),
253
+ np.array([0.0, 0.454913]),
254
+ True,
255
+ ),
256
+ (
257
+ # A feature in X is constant - not forcing finite
258
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
259
+ np.array([0, 1, 1, 0]),
260
+ np.array([np.nan, 0.2293578]),
261
+ np.array([np.nan, 0.67924985]),
262
+ False,
263
+ ),
264
+ (
265
+ # The target y is constant - not forcing finite
266
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
267
+ np.array([0, 0, 0, 0]),
268
+ np.array([np.nan, np.nan]),
269
+ np.array([np.nan, np.nan]),
270
+ False,
271
+ ),
272
+ (
273
+ # Feature in X correlated with y - not forcing finite
274
+ np.array([[0, 1], [1, 0], [2, 10], [3, 4]]),
275
+ np.array([0, 1, 2, 3]),
276
+ np.array([np.inf, 0.845433]),
277
+ np.array([0.0, 0.454913]),
278
+ False,
279
+ ),
280
+ (
281
+ # Feature in X anti-correlated with y - not forcing finite
282
+ np.array([[3, 1], [2, 0], [1, 10], [0, 4]]),
283
+ np.array([0, 1, 2, 3]),
284
+ np.array([np.inf, 0.845433]),
285
+ np.array([0.0, 0.454913]),
286
+ False,
287
+ ),
288
+ ],
289
+ )
290
+ def test_f_regression_corner_case(
291
+ X, y, expected_f_statistic, expected_p_values, force_finite
292
+ ):
293
+ """Check the behaviour of `force_finite` for some corner cases with `f_regression`.
294
+
295
+ Non-regression test for:
296
+ https://github.com/scikit-learn/scikit-learn/issues/15672
297
+ """
298
+ with warnings.catch_warnings():
299
+ warnings.simplefilter("error", RuntimeWarning)
300
+ f_statistic, p_values = f_regression(X, y, force_finite=force_finite)
301
+ np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic)
302
+ np.testing.assert_array_almost_equal(p_values, expected_p_values)
303
+
304
+
305
+ def test_f_classif_multi_class():
306
+ # Test whether the F test yields meaningful results
307
+ # on a simple simulated classification problem
308
+ X, y = make_classification(
309
+ n_samples=200,
310
+ n_features=20,
311
+ n_informative=3,
312
+ n_redundant=2,
313
+ n_repeated=0,
314
+ n_classes=8,
315
+ n_clusters_per_class=1,
316
+ flip_y=0.0,
317
+ class_sep=10,
318
+ shuffle=False,
319
+ random_state=0,
320
+ )
321
+
322
+ F, pv = f_classif(X, y)
323
+ assert (F > 0).all()
324
+ assert (pv > 0).all()
325
+ assert (pv < 1).all()
326
+ assert (pv[:5] < 0.05).all()
327
+ assert (pv[5:] > 1.0e-4).all()
328
+
329
+
330
+ def test_select_percentile_classif():
331
+ # Test whether the relative univariate feature selection
332
+ # gets the correct items in a simple classification problem
333
+ # with the percentile heuristic
334
+ X, y = make_classification(
335
+ n_samples=200,
336
+ n_features=20,
337
+ n_informative=3,
338
+ n_redundant=2,
339
+ n_repeated=0,
340
+ n_classes=8,
341
+ n_clusters_per_class=1,
342
+ flip_y=0.0,
343
+ class_sep=10,
344
+ shuffle=False,
345
+ random_state=0,
346
+ )
347
+
348
+ univariate_filter = SelectPercentile(f_classif, percentile=25)
349
+ X_r = univariate_filter.fit(X, y).transform(X)
350
+ X_r2 = (
351
+ GenericUnivariateSelect(f_classif, mode="percentile", param=25)
352
+ .fit(X, y)
353
+ .transform(X)
354
+ )
355
+ assert_array_equal(X_r, X_r2)
356
+ support = univariate_filter.get_support()
357
+ gtruth = np.zeros(20)
358
+ gtruth[:5] = 1
359
+ assert_array_equal(support, gtruth)
360
+
361
+
362
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
363
+ def test_select_percentile_classif_sparse(csr_container):
364
+ # Test whether the relative univariate feature selection
365
+ # gets the correct items in a simple classification problem
366
+ # with the percentile heuristic
367
+ X, y = make_classification(
368
+ n_samples=200,
369
+ n_features=20,
370
+ n_informative=3,
371
+ n_redundant=2,
372
+ n_repeated=0,
373
+ n_classes=8,
374
+ n_clusters_per_class=1,
375
+ flip_y=0.0,
376
+ class_sep=10,
377
+ shuffle=False,
378
+ random_state=0,
379
+ )
380
+ X = csr_container(X)
381
+ univariate_filter = SelectPercentile(f_classif, percentile=25)
382
+ X_r = univariate_filter.fit(X, y).transform(X)
383
+ X_r2 = (
384
+ GenericUnivariateSelect(f_classif, mode="percentile", param=25)
385
+ .fit(X, y)
386
+ .transform(X)
387
+ )
388
+ assert_array_equal(X_r.toarray(), X_r2.toarray())
389
+ support = univariate_filter.get_support()
390
+ gtruth = np.zeros(20)
391
+ gtruth[:5] = 1
392
+ assert_array_equal(support, gtruth)
393
+
394
+ X_r2inv = univariate_filter.inverse_transform(X_r2)
395
+ assert sparse.issparse(X_r2inv)
396
+ support_mask = safe_mask(X_r2inv, support)
397
+ assert X_r2inv.shape == X.shape
398
+ assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
399
+ # Check other columns are empty
400
+ assert X_r2inv.nnz == X_r.nnz
401
+
402
+
403
+ ##############################################################################
404
+ # Test univariate selection in classification settings
405
+
406
+
407
+ def test_select_kbest_classif():
408
+ # Test whether the relative univariate feature selection
409
+ # gets the correct items in a simple classification problem
410
+ # with the k best heuristic
411
+ X, y = make_classification(
412
+ n_samples=200,
413
+ n_features=20,
414
+ n_informative=3,
415
+ n_redundant=2,
416
+ n_repeated=0,
417
+ n_classes=8,
418
+ n_clusters_per_class=1,
419
+ flip_y=0.0,
420
+ class_sep=10,
421
+ shuffle=False,
422
+ random_state=0,
423
+ )
424
+
425
+ univariate_filter = SelectKBest(f_classif, k=5)
426
+ X_r = univariate_filter.fit(X, y).transform(X)
427
+ X_r2 = (
428
+ GenericUnivariateSelect(f_classif, mode="k_best", param=5)
429
+ .fit(X, y)
430
+ .transform(X)
431
+ )
432
+ assert_array_equal(X_r, X_r2)
433
+ support = univariate_filter.get_support()
434
+ gtruth = np.zeros(20)
435
+ gtruth[:5] = 1
436
+ assert_array_equal(support, gtruth)
437
+
438
+
439
+ def test_select_kbest_all():
440
+ # Test whether k="all" correctly returns all features.
441
+ X, y = make_classification(
442
+ n_samples=20, n_features=10, shuffle=False, random_state=0
443
+ )
444
+
445
+ univariate_filter = SelectKBest(f_classif, k="all")
446
+ X_r = univariate_filter.fit(X, y).transform(X)
447
+ assert_array_equal(X, X_r)
448
+ # Non-regression test for:
449
+ # https://github.com/scikit-learn/scikit-learn/issues/24949
450
+ X_r2 = (
451
+ GenericUnivariateSelect(f_classif, mode="k_best", param="all")
452
+ .fit(X, y)
453
+ .transform(X)
454
+ )
455
+ assert_array_equal(X_r, X_r2)
456
+
457
+
458
+ @pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
459
+ def test_select_kbest_zero(dtype_in):
460
+ # Test whether k=0 correctly returns no features.
461
+ X, y = make_classification(
462
+ n_samples=20, n_features=10, shuffle=False, random_state=0
463
+ )
464
+ X = X.astype(dtype_in)
465
+
466
+ univariate_filter = SelectKBest(f_classif, k=0)
467
+ univariate_filter.fit(X, y)
468
+ support = univariate_filter.get_support()
469
+ gtruth = np.zeros(10, dtype=bool)
470
+ assert_array_equal(support, gtruth)
471
+ with pytest.warns(UserWarning, match="No features were selected"):
472
+ X_selected = univariate_filter.transform(X)
473
+ assert X_selected.shape == (20, 0)
474
+ assert X_selected.dtype == dtype_in
475
+
476
+
477
+ def test_select_heuristics_classif():
478
+ # Test whether the relative univariate feature selection
479
+ # gets the correct items in a simple classification problem
480
+ # with the fdr, fwe and fpr heuristics
481
+ X, y = make_classification(
482
+ n_samples=200,
483
+ n_features=20,
484
+ n_informative=3,
485
+ n_redundant=2,
486
+ n_repeated=0,
487
+ n_classes=8,
488
+ n_clusters_per_class=1,
489
+ flip_y=0.0,
490
+ class_sep=10,
491
+ shuffle=False,
492
+ random_state=0,
493
+ )
494
+
495
+ univariate_filter = SelectFwe(f_classif, alpha=0.01)
496
+ X_r = univariate_filter.fit(X, y).transform(X)
497
+ gtruth = np.zeros(20)
498
+ gtruth[:5] = 1
499
+ for mode in ["fdr", "fpr", "fwe"]:
500
+ X_r2 = (
501
+ GenericUnivariateSelect(f_classif, mode=mode, param=0.01)
502
+ .fit(X, y)
503
+ .transform(X)
504
+ )
505
+ assert_array_equal(X_r, X_r2)
506
+ support = univariate_filter.get_support()
507
+ assert_allclose(support, gtruth)
508
+
509
+
510
+ ##############################################################################
511
+ # Test univariate selection in regression settings
512
+
513
+
514
+ def assert_best_scores_kept(score_filter):
515
+ scores = score_filter.scores_
516
+ support = score_filter.get_support()
517
+ assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum() :])
518
+
519
+
520
+ def test_select_percentile_regression():
521
+ # Test whether the relative univariate feature selection
522
+ # gets the correct items in a simple regression problem
523
+ # with the percentile heuristic
524
+ X, y = make_regression(
525
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
526
+ )
527
+
528
+ univariate_filter = SelectPercentile(f_regression, percentile=25)
529
+ X_r = univariate_filter.fit(X, y).transform(X)
530
+ assert_best_scores_kept(univariate_filter)
531
+ X_r2 = (
532
+ GenericUnivariateSelect(f_regression, mode="percentile", param=25)
533
+ .fit(X, y)
534
+ .transform(X)
535
+ )
536
+ assert_array_equal(X_r, X_r2)
537
+ support = univariate_filter.get_support()
538
+ gtruth = np.zeros(20)
539
+ gtruth[:5] = 1
540
+ assert_array_equal(support, gtruth)
541
+ X_2 = X.copy()
542
+ X_2[:, np.logical_not(support)] = 0
543
+ assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
544
+ # Check inverse_transform respects dtype
545
+ assert_array_equal(
546
+ X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool))
547
+ )
548
+
549
+
550
+ def test_select_percentile_regression_full():
551
+ # Test whether the relative univariate feature selection
552
+ # selects all features when '100%' is asked.
553
+ X, y = make_regression(
554
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
555
+ )
556
+
557
+ univariate_filter = SelectPercentile(f_regression, percentile=100)
558
+ X_r = univariate_filter.fit(X, y).transform(X)
559
+ assert_best_scores_kept(univariate_filter)
560
+ X_r2 = (
561
+ GenericUnivariateSelect(f_regression, mode="percentile", param=100)
562
+ .fit(X, y)
563
+ .transform(X)
564
+ )
565
+ assert_array_equal(X_r, X_r2)
566
+ support = univariate_filter.get_support()
567
+ gtruth = np.ones(20)
568
+ assert_array_equal(support, gtruth)
569
+
570
+
571
+ def test_select_kbest_regression():
572
+ # Test whether the relative univariate feature selection
573
+ # gets the correct items in a simple regression problem
574
+ # with the k best heuristic
575
+ X, y = make_regression(
576
+ n_samples=200,
577
+ n_features=20,
578
+ n_informative=5,
579
+ shuffle=False,
580
+ random_state=0,
581
+ noise=10,
582
+ )
583
+
584
+ univariate_filter = SelectKBest(f_regression, k=5)
585
+ X_r = univariate_filter.fit(X, y).transform(X)
586
+ assert_best_scores_kept(univariate_filter)
587
+ X_r2 = (
588
+ GenericUnivariateSelect(f_regression, mode="k_best", param=5)
589
+ .fit(X, y)
590
+ .transform(X)
591
+ )
592
+ assert_array_equal(X_r, X_r2)
593
+ support = univariate_filter.get_support()
594
+ gtruth = np.zeros(20)
595
+ gtruth[:5] = 1
596
+ assert_array_equal(support, gtruth)
597
+
598
+
599
+ def test_select_heuristics_regression():
600
+ # Test whether the relative univariate feature selection
601
+ # gets the correct items in a simple regression problem
602
+ # with the fpr, fdr or fwe heuristics
603
+ X, y = make_regression(
604
+ n_samples=200,
605
+ n_features=20,
606
+ n_informative=5,
607
+ shuffle=False,
608
+ random_state=0,
609
+ noise=10,
610
+ )
611
+
612
+ univariate_filter = SelectFpr(f_regression, alpha=0.01)
613
+ X_r = univariate_filter.fit(X, y).transform(X)
614
+ gtruth = np.zeros(20)
615
+ gtruth[:5] = 1
616
+ for mode in ["fdr", "fpr", "fwe"]:
617
+ X_r2 = (
618
+ GenericUnivariateSelect(f_regression, mode=mode, param=0.01)
619
+ .fit(X, y)
620
+ .transform(X)
621
+ )
622
+ assert_array_equal(X_r, X_r2)
623
+ support = univariate_filter.get_support()
624
+ assert_array_equal(support[:5], np.ones((5,), dtype=bool))
625
+ assert np.sum(support[5:] == 1) < 3
626
+
627
+
628
+ def test_boundary_case_ch2():
629
+ # Test boundary case, and always aim to select 1 feature.
630
+ X = np.array([[10, 20], [20, 20], [20, 30]])
631
+ y = np.array([[1], [0], [0]])
632
+ scores, pvalues = chi2(X, y)
633
+ assert_array_almost_equal(scores, np.array([4.0, 0.71428571]))
634
+ assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
635
+
636
+ filter_fdr = SelectFdr(chi2, alpha=0.1)
637
+ filter_fdr.fit(X, y)
638
+ support_fdr = filter_fdr.get_support()
639
+ assert_array_equal(support_fdr, np.array([True, False]))
640
+
641
+ filter_kbest = SelectKBest(chi2, k=1)
642
+ filter_kbest.fit(X, y)
643
+ support_kbest = filter_kbest.get_support()
644
+ assert_array_equal(support_kbest, np.array([True, False]))
645
+
646
+ filter_percentile = SelectPercentile(chi2, percentile=50)
647
+ filter_percentile.fit(X, y)
648
+ support_percentile = filter_percentile.get_support()
649
+ assert_array_equal(support_percentile, np.array([True, False]))
650
+
651
+ filter_fpr = SelectFpr(chi2, alpha=0.1)
652
+ filter_fpr.fit(X, y)
653
+ support_fpr = filter_fpr.get_support()
654
+ assert_array_equal(support_fpr, np.array([True, False]))
655
+
656
+ filter_fwe = SelectFwe(chi2, alpha=0.1)
657
+ filter_fwe.fit(X, y)
658
+ support_fwe = filter_fwe.get_support()
659
+ assert_array_equal(support_fwe, np.array([True, False]))
660
+
661
+
662
+ @pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1])
663
+ @pytest.mark.parametrize("n_informative", [1, 5, 10])
664
+ def test_select_fdr_regression(alpha, n_informative):
665
+ # Test that fdr heuristic actually has low FDR.
666
+ def single_fdr(alpha, n_informative, random_state):
667
+ X, y = make_regression(
668
+ n_samples=150,
669
+ n_features=20,
670
+ n_informative=n_informative,
671
+ shuffle=False,
672
+ random_state=random_state,
673
+ noise=10,
674
+ )
675
+
676
+ with warnings.catch_warnings(record=True):
677
+ # Warnings can be raised when no features are selected
678
+ # (low alpha or very noisy data)
679
+ univariate_filter = SelectFdr(f_regression, alpha=alpha)
680
+ X_r = univariate_filter.fit(X, y).transform(X)
681
+ X_r2 = (
682
+ GenericUnivariateSelect(f_regression, mode="fdr", param=alpha)
683
+ .fit(X, y)
684
+ .transform(X)
685
+ )
686
+
687
+ assert_array_equal(X_r, X_r2)
688
+ support = univariate_filter.get_support()
689
+ num_false_positives = np.sum(support[n_informative:] == 1)
690
+ num_true_positives = np.sum(support[:n_informative] == 1)
691
+
692
+ if num_false_positives == 0:
693
+ return 0.0
694
+ false_discovery_rate = num_false_positives / (
695
+ num_true_positives + num_false_positives
696
+ )
697
+ return false_discovery_rate
698
+
699
+ # As per Benjamini-Hochberg, the expected false discovery rate
700
+ # should be lower than alpha:
701
+ # FDR = E(FP / (TP + FP)) <= alpha
702
+ false_discovery_rate = np.mean(
703
+ [single_fdr(alpha, n_informative, random_state) for random_state in range(100)]
704
+ )
705
+ assert alpha >= false_discovery_rate
706
+
707
+ # Make sure that the empirical false discovery rate increases
708
+ # with alpha:
709
+ if false_discovery_rate != 0:
710
+ assert false_discovery_rate > alpha / 10
711
+
712
+
713
+ def test_select_fwe_regression():
714
+ # Test whether the relative univariate feature selection
715
+ # gets the correct items in a simple regression problem
716
+ # with the fwe heuristic
717
+ X, y = make_regression(
718
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
719
+ )
720
+
721
+ univariate_filter = SelectFwe(f_regression, alpha=0.01)
722
+ X_r = univariate_filter.fit(X, y).transform(X)
723
+ X_r2 = (
724
+ GenericUnivariateSelect(f_regression, mode="fwe", param=0.01)
725
+ .fit(X, y)
726
+ .transform(X)
727
+ )
728
+ assert_array_equal(X_r, X_r2)
729
+ support = univariate_filter.get_support()
730
+ gtruth = np.zeros(20)
731
+ gtruth[:5] = 1
732
+ assert_array_equal(support[:5], np.ones((5,), dtype=bool))
733
+ assert np.sum(support[5:] == 1) < 2
734
+
735
+
736
+ def test_selectkbest_tiebreaking():
737
+ # Test whether SelectKBest actually selects k features in case of ties.
738
+ # Prior to 0.11, SelectKBest would return more features than requested.
739
+ Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
740
+ y = [1]
741
+ dummy_score = lambda X, y: (X[0], X[0])
742
+ for X in Xs:
743
+ sel = SelectKBest(dummy_score, k=1)
744
+ X1 = ignore_warnings(sel.fit_transform)([X], y)
745
+ assert X1.shape[1] == 1
746
+ assert_best_scores_kept(sel)
747
+
748
+ sel = SelectKBest(dummy_score, k=2)
749
+ X2 = ignore_warnings(sel.fit_transform)([X], y)
750
+ assert X2.shape[1] == 2
751
+ assert_best_scores_kept(sel)
752
+
753
+
754
+ def test_selectpercentile_tiebreaking():
755
+ # Test if SelectPercentile selects the right n_features in case of ties.
756
+ Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
757
+ y = [1]
758
+ dummy_score = lambda X, y: (X[0], X[0])
759
+ for X in Xs:
760
+ sel = SelectPercentile(dummy_score, percentile=34)
761
+ X1 = ignore_warnings(sel.fit_transform)([X], y)
762
+ assert X1.shape[1] == 1
763
+ assert_best_scores_kept(sel)
764
+
765
+ sel = SelectPercentile(dummy_score, percentile=67)
766
+ X2 = ignore_warnings(sel.fit_transform)([X], y)
767
+ assert X2.shape[1] == 2
768
+ assert_best_scores_kept(sel)
769
+
770
+
771
+ def test_tied_pvalues():
772
+ # Test whether k-best and percentiles work with tied pvalues from chi2.
773
+ # chi2 will return the same p-values for the following features, but it
774
+ # will return different scores.
775
+ X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
776
+ y = [0, 1]
777
+
778
+ for perm in itertools.permutations((0, 1, 2)):
779
+ X = X0[:, perm]
780
+ Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
781
+ assert Xt.shape == (2, 2)
782
+ assert 9998 not in Xt
783
+
784
+ Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
785
+ assert Xt.shape == (2, 2)
786
+ assert 9998 not in Xt
787
+
788
+
789
+ def test_scorefunc_multilabel():
790
+ # Test whether k-best and percentiles works with multilabels with chi2.
791
+
792
+ X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
793
+ y = [[1, 1], [0, 1], [1, 0]]
794
+
795
+ Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
796
+ assert Xt.shape == (3, 2)
797
+ assert 0 not in Xt
798
+
799
+ Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
800
+ assert Xt.shape == (3, 2)
801
+ assert 0 not in Xt
802
+
803
+
804
+ def test_tied_scores():
805
+ # Test for stable sorting in k-best with tied scores.
806
+ X_train = np.array([[0, 0, 0], [1, 1, 1]])
807
+ y_train = [0, 1]
808
+
809
+ for n_features in [1, 2, 3]:
810
+ sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
811
+ X_test = sel.transform([[0, 1, 2]])
812
+ assert_array_equal(X_test[0], np.arange(3)[-n_features:])
813
+
814
+
815
+ def test_nans():
816
+ # Assert that SelectKBest and SelectPercentile can handle NaNs.
817
+ # First feature has zero variance to confuse f_classif (ANOVA) and
818
+ # make it return a NaN.
819
+ X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]]
820
+ y = [1, 0, 1]
821
+
822
+ for select in (
823
+ SelectKBest(f_classif, k=2),
824
+ SelectPercentile(f_classif, percentile=67),
825
+ ):
826
+ ignore_warnings(select.fit)(X, y)
827
+ assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
828
+
829
+
830
+ def test_invalid_k():
831
+ X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]]
832
+ y = [1, 0, 1]
833
+
834
+ msg = "k=4 is greater than n_features=3. All the features will be returned."
835
+ with pytest.warns(UserWarning, match=msg):
836
+ SelectKBest(k=4).fit(X, y)
837
+ with pytest.warns(UserWarning, match=msg):
838
+ GenericUnivariateSelect(mode="k_best", param=4).fit(X, y)
839
+
840
+
841
+ def test_f_classif_constant_feature():
842
+ # Test that f_classif warns if a feature is constant throughout.
843
+
844
+ X, y = make_classification(n_samples=10, n_features=5)
845
+ X[:, 0] = 2.0
846
+ with pytest.warns(UserWarning):
847
+ f_classif(X, y)
848
+
849
+
850
+ def test_no_feature_selected():
851
+ rng = np.random.RandomState(0)
852
+
853
+ # Generate random uncorrelated data: a strict univariate test should
854
+ # rejects all the features
855
+ X = rng.rand(40, 10)
856
+ y = rng.randint(0, 4, size=40)
857
+ strict_selectors = [
858
+ SelectFwe(alpha=0.01).fit(X, y),
859
+ SelectFdr(alpha=0.01).fit(X, y),
860
+ SelectFpr(alpha=0.01).fit(X, y),
861
+ SelectPercentile(percentile=0).fit(X, y),
862
+ SelectKBest(k=0).fit(X, y),
863
+ ]
864
+ for selector in strict_selectors:
865
+ assert_array_equal(selector.get_support(), np.zeros(10))
866
+ with pytest.warns(UserWarning, match="No features were selected"):
867
+ X_selected = selector.transform(X)
868
+ assert X_selected.shape == (40, 0)
869
+
870
+
871
+ def test_mutual_info_classif():
872
+ X, y = make_classification(
873
+ n_samples=100,
874
+ n_features=5,
875
+ n_informative=1,
876
+ n_redundant=1,
877
+ n_repeated=0,
878
+ n_classes=2,
879
+ n_clusters_per_class=1,
880
+ flip_y=0.0,
881
+ class_sep=10,
882
+ shuffle=False,
883
+ random_state=0,
884
+ )
885
+
886
+ # Test in KBest mode.
887
+ univariate_filter = SelectKBest(mutual_info_classif, k=2)
888
+ X_r = univariate_filter.fit(X, y).transform(X)
889
+ X_r2 = (
890
+ GenericUnivariateSelect(mutual_info_classif, mode="k_best", param=2)
891
+ .fit(X, y)
892
+ .transform(X)
893
+ )
894
+ assert_array_equal(X_r, X_r2)
895
+ support = univariate_filter.get_support()
896
+ gtruth = np.zeros(5)
897
+ gtruth[:2] = 1
898
+ assert_array_equal(support, gtruth)
899
+
900
+ # Test in Percentile mode.
901
+ univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
902
+ X_r = univariate_filter.fit(X, y).transform(X)
903
+ X_r2 = (
904
+ GenericUnivariateSelect(mutual_info_classif, mode="percentile", param=40)
905
+ .fit(X, y)
906
+ .transform(X)
907
+ )
908
+ assert_array_equal(X_r, X_r2)
909
+ support = univariate_filter.get_support()
910
+ gtruth = np.zeros(5)
911
+ gtruth[:2] = 1
912
+ assert_array_equal(support, gtruth)
913
+
914
+
915
+ def test_mutual_info_regression():
916
+ X, y = make_regression(
917
+ n_samples=100,
918
+ n_features=10,
919
+ n_informative=2,
920
+ shuffle=False,
921
+ random_state=0,
922
+ noise=10,
923
+ )
924
+
925
+ # Test in KBest mode.
926
+ univariate_filter = SelectKBest(mutual_info_regression, k=2)
927
+ X_r = univariate_filter.fit(X, y).transform(X)
928
+ assert_best_scores_kept(univariate_filter)
929
+ X_r2 = (
930
+ GenericUnivariateSelect(mutual_info_regression, mode="k_best", param=2)
931
+ .fit(X, y)
932
+ .transform(X)
933
+ )
934
+ assert_array_equal(X_r, X_r2)
935
+ support = univariate_filter.get_support()
936
+ gtruth = np.zeros(10)
937
+ gtruth[:2] = 1
938
+ assert_array_equal(support, gtruth)
939
+
940
+ # Test in Percentile mode.
941
+ univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
942
+ X_r = univariate_filter.fit(X, y).transform(X)
943
+ X_r2 = (
944
+ GenericUnivariateSelect(mutual_info_regression, mode="percentile", param=20)
945
+ .fit(X, y)
946
+ .transform(X)
947
+ )
948
+ assert_array_equal(X_r, X_r2)
949
+ support = univariate_filter.get_support()
950
+ gtruth = np.zeros(10)
951
+ gtruth[:2] = 1
952
+ assert_array_equal(support, gtruth)
953
+
954
+
955
+ def test_dataframe_output_dtypes():
956
+ """Check that the output datafarme dtypes are the same as the input.
957
+
958
+ Non-regression test for gh-24860.
959
+ """
960
+ pd = pytest.importorskip("pandas")
961
+
962
+ X, y = load_iris(return_X_y=True, as_frame=True)
963
+ X = X.astype(
964
+ {
965
+ "petal length (cm)": np.float32,
966
+ "petal width (cm)": np.float64,
967
+ }
968
+ )
969
+ X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10)
970
+
971
+ column_order = X.columns
972
+
973
+ def selector(X, y):
974
+ ranking = {
975
+ "sepal length (cm)": 1,
976
+ "sepal width (cm)": 2,
977
+ "petal length (cm)": 3,
978
+ "petal width (cm)": 4,
979
+ "petal_width_binned": 5,
980
+ }
981
+ return np.asarray([ranking[name] for name in column_order])
982
+
983
+ univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas")
984
+ output = univariate_filter.fit_transform(X, y)
985
+
986
+ assert_array_equal(
987
+ output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"]
988
+ )
989
+ for name, dtype in output.dtypes.items():
990
+ assert dtype == X.dtypes[name]
991
+
992
+
993
+ @pytest.mark.parametrize(
994
+ "selector",
995
+ [
996
+ SelectKBest(k=4),
997
+ SelectPercentile(percentile=80),
998
+ GenericUnivariateSelect(mode="k_best", param=4),
999
+ GenericUnivariateSelect(mode="percentile", param=80),
1000
+ ],
1001
+ )
1002
+ def test_unsupervised_filter(selector):
1003
+ """Check support for unsupervised feature selection for the filter that could
1004
+ require only `X`.
1005
+ """
1006
+ rng = np.random.RandomState(0)
1007
+ X = rng.randn(10, 5)
1008
+
1009
+ def score_func(X, y=None):
1010
+ return np.array([1, 1, 1, 1, 0])
1011
+
1012
+ selector.set_params(score_func=score_func)
1013
+ selector.fit(X)
1014
+ X_trans = selector.transform(X)
1015
+ assert_allclose(X_trans, X[:, :4])
1016
+ X_trans = selector.fit_transform(X)
1017
+ assert_allclose(X_trans, X[:, :4])
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import warnings
3
+ from unittest.mock import Mock
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from sklearn import datasets
9
+ from sklearn.base import BaseEstimator
10
+ from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression
11
+ from sklearn.datasets import make_friedman1
12
+ from sklearn.decomposition import PCA
13
+ from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier
14
+ from sklearn.exceptions import NotFittedError
15
+ from sklearn.feature_selection import SelectFromModel
16
+ from sklearn.linear_model import (
17
+ ElasticNet,
18
+ ElasticNetCV,
19
+ Lasso,
20
+ LassoCV,
21
+ LinearRegression,
22
+ LogisticRegression,
23
+ PassiveAggressiveClassifier,
24
+ SGDClassifier,
25
+ )
26
+ from sklearn.pipeline import make_pipeline
27
+ from sklearn.svm import LinearSVC
28
+ from sklearn.utils._testing import (
29
+ MinimalClassifier,
30
+ assert_allclose,
31
+ assert_array_almost_equal,
32
+ assert_array_equal,
33
+ skip_if_32bit,
34
+ )
35
+
36
+
37
+ class NaNTag(BaseEstimator):
38
+ def _more_tags(self):
39
+ return {"allow_nan": True}
40
+
41
+
42
+ class NoNaNTag(BaseEstimator):
43
+ def _more_tags(self):
44
+ return {"allow_nan": False}
45
+
46
+
47
+ class NaNTagRandomForest(RandomForestClassifier):
48
+ def _more_tags(self):
49
+ return {"allow_nan": True}
50
+
51
+
52
+ iris = datasets.load_iris()
53
+ data, y = iris.data, iris.target
54
+ rng = np.random.RandomState(0)
55
+
56
+
57
+ def test_invalid_input():
58
+ clf = SGDClassifier(
59
+ alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None
60
+ )
61
+ for threshold in ["gobbledigook", ".5 * gobbledigook"]:
62
+ model = SelectFromModel(clf, threshold=threshold)
63
+ model.fit(data, y)
64
+ with pytest.raises(ValueError):
65
+ model.transform(data)
66
+
67
+
68
+ def test_input_estimator_unchanged():
69
+ # Test that SelectFromModel fits on a clone of the estimator.
70
+ est = RandomForestClassifier()
71
+ transformer = SelectFromModel(estimator=est)
72
+ transformer.fit(data, y)
73
+ assert transformer.estimator is est
74
+
75
+
76
+ @pytest.mark.parametrize(
77
+ "max_features, err_type, err_msg",
78
+ [
79
+ (
80
+ data.shape[1] + 1,
81
+ ValueError,
82
+ "max_features ==",
83
+ ),
84
+ (
85
+ lambda X: 1.5,
86
+ TypeError,
87
+ "max_features must be an instance of int, not float.",
88
+ ),
89
+ (
90
+ lambda X: data.shape[1] + 1,
91
+ ValueError,
92
+ "max_features ==",
93
+ ),
94
+ (
95
+ lambda X: -1,
96
+ ValueError,
97
+ "max_features ==",
98
+ ),
99
+ ],
100
+ )
101
+ def test_max_features_error(max_features, err_type, err_msg):
102
+ err_msg = re.escape(err_msg)
103
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
104
+
105
+ transformer = SelectFromModel(
106
+ estimator=clf, max_features=max_features, threshold=-np.inf
107
+ )
108
+ with pytest.raises(err_type, match=err_msg):
109
+ transformer.fit(data, y)
110
+
111
+
112
+ @pytest.mark.parametrize("max_features", [0, 2, data.shape[1], None])
113
+ def test_inferred_max_features_integer(max_features):
114
+ """Check max_features_ and output shape for integer max_features."""
115
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
116
+ transformer = SelectFromModel(
117
+ estimator=clf, max_features=max_features, threshold=-np.inf
118
+ )
119
+ X_trans = transformer.fit_transform(data, y)
120
+ if max_features is not None:
121
+ assert transformer.max_features_ == max_features
122
+ assert X_trans.shape[1] == transformer.max_features_
123
+ else:
124
+ assert not hasattr(transformer, "max_features_")
125
+ assert X_trans.shape[1] == data.shape[1]
126
+
127
+
128
+ @pytest.mark.parametrize(
129
+ "max_features",
130
+ [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)],
131
+ )
132
+ def test_inferred_max_features_callable(max_features):
133
+ """Check max_features_ and output shape for callable max_features."""
134
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
135
+ transformer = SelectFromModel(
136
+ estimator=clf, max_features=max_features, threshold=-np.inf
137
+ )
138
+ X_trans = transformer.fit_transform(data, y)
139
+ assert transformer.max_features_ == max_features(data)
140
+ assert X_trans.shape[1] == transformer.max_features_
141
+
142
+
143
+ @pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2])
144
+ def test_max_features_array_like(max_features):
145
+ X = [
146
+ [0.87, -1.34, 0.31],
147
+ [-2.79, -0.02, -0.85],
148
+ [-1.34, -0.48, -2.55],
149
+ [1.92, 1.48, 0.65],
150
+ ]
151
+ y = [0, 1, 0, 1]
152
+
153
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
154
+ transformer = SelectFromModel(
155
+ estimator=clf, max_features=max_features, threshold=-np.inf
156
+ )
157
+ X_trans = transformer.fit_transform(X, y)
158
+ assert X_trans.shape[1] == transformer.max_features_
159
+
160
+
161
+ @pytest.mark.parametrize(
162
+ "max_features",
163
+ [lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1],
164
+ )
165
+ def test_max_features_callable_data(max_features):
166
+ """Tests that the callable passed to `fit` is called on X."""
167
+ clf = RandomForestClassifier(n_estimators=50, random_state=0)
168
+ m = Mock(side_effect=max_features)
169
+ transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf)
170
+ transformer.fit_transform(data, y)
171
+ m.assert_called_with(data)
172
+
173
+
174
+ class FixedImportanceEstimator(BaseEstimator):
175
+ def __init__(self, importances):
176
+ self.importances = importances
177
+
178
+ def fit(self, X, y=None):
179
+ self.feature_importances_ = np.array(self.importances)
180
+
181
+
182
+ def test_max_features():
183
+ # Test max_features parameter using various values
184
+ X, y = datasets.make_classification(
185
+ n_samples=1000,
186
+ n_features=10,
187
+ n_informative=3,
188
+ n_redundant=0,
189
+ n_repeated=0,
190
+ shuffle=False,
191
+ random_state=0,
192
+ )
193
+ max_features = X.shape[1]
194
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
195
+
196
+ transformer1 = SelectFromModel(estimator=est, threshold=-np.inf)
197
+ transformer2 = SelectFromModel(
198
+ estimator=est, max_features=max_features, threshold=-np.inf
199
+ )
200
+ X_new1 = transformer1.fit_transform(X, y)
201
+ X_new2 = transformer2.fit_transform(X, y)
202
+ assert_allclose(X_new1, X_new2)
203
+
204
+ # Test max_features against actual model.
205
+ transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42))
206
+ X_new1 = transformer1.fit_transform(X, y)
207
+ scores1 = np.abs(transformer1.estimator_.coef_)
208
+ candidate_indices1 = np.argsort(-scores1, kind="mergesort")
209
+
210
+ for n_features in range(1, X_new1.shape[1] + 1):
211
+ transformer2 = SelectFromModel(
212
+ estimator=Lasso(alpha=0.025, random_state=42),
213
+ max_features=n_features,
214
+ threshold=-np.inf,
215
+ )
216
+ X_new2 = transformer2.fit_transform(X, y)
217
+ scores2 = np.abs(transformer2.estimator_.coef_)
218
+ candidate_indices2 = np.argsort(-scores2, kind="mergesort")
219
+ assert_allclose(
220
+ X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]]
221
+ )
222
+ assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_)
223
+
224
+
225
+ def test_max_features_tiebreak():
226
+ # Test if max_features can break tie among feature importance
227
+ X, y = datasets.make_classification(
228
+ n_samples=1000,
229
+ n_features=10,
230
+ n_informative=3,
231
+ n_redundant=0,
232
+ n_repeated=0,
233
+ shuffle=False,
234
+ random_state=0,
235
+ )
236
+ max_features = X.shape[1]
237
+
238
+ feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1])
239
+ for n_features in range(1, max_features + 1):
240
+ transformer = SelectFromModel(
241
+ FixedImportanceEstimator(feature_importances),
242
+ max_features=n_features,
243
+ threshold=-np.inf,
244
+ )
245
+ X_new = transformer.fit_transform(X, y)
246
+ selected_feature_indices = np.where(transformer._get_support_mask())[0]
247
+ assert_array_equal(selected_feature_indices, np.arange(n_features))
248
+ assert X_new.shape[1] == n_features
249
+
250
+
251
+ def test_threshold_and_max_features():
252
+ X, y = datasets.make_classification(
253
+ n_samples=1000,
254
+ n_features=10,
255
+ n_informative=3,
256
+ n_redundant=0,
257
+ n_repeated=0,
258
+ shuffle=False,
259
+ random_state=0,
260
+ )
261
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
262
+
263
+ transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf)
264
+ X_new1 = transformer1.fit_transform(X, y)
265
+
266
+ transformer2 = SelectFromModel(estimator=est, threshold=0.04)
267
+ X_new2 = transformer2.fit_transform(X, y)
268
+
269
+ transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04)
270
+ X_new3 = transformer3.fit_transform(X, y)
271
+ assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1])
272
+ selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :])
273
+ assert_allclose(X_new3, X[:, selected_indices[0]])
274
+
275
+
276
+ @skip_if_32bit
277
+ def test_feature_importances():
278
+ X, y = datasets.make_classification(
279
+ n_samples=1000,
280
+ n_features=10,
281
+ n_informative=3,
282
+ n_redundant=0,
283
+ n_repeated=0,
284
+ shuffle=False,
285
+ random_state=0,
286
+ )
287
+
288
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
289
+ for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
290
+ transformer = SelectFromModel(estimator=est, threshold=threshold)
291
+ transformer.fit(X, y)
292
+ assert hasattr(transformer.estimator_, "feature_importances_")
293
+
294
+ X_new = transformer.transform(X)
295
+ assert X_new.shape[1] < X.shape[1]
296
+ importances = transformer.estimator_.feature_importances_
297
+
298
+ feature_mask = np.abs(importances) > func(importances)
299
+ assert_array_almost_equal(X_new, X[:, feature_mask])
300
+
301
+
302
+ def test_sample_weight():
303
+ # Ensure sample weights are passed to underlying estimator
304
+ X, y = datasets.make_classification(
305
+ n_samples=100,
306
+ n_features=10,
307
+ n_informative=3,
308
+ n_redundant=0,
309
+ n_repeated=0,
310
+ shuffle=False,
311
+ random_state=0,
312
+ )
313
+
314
+ # Check with sample weights
315
+ sample_weight = np.ones(y.shape)
316
+ sample_weight[y == 1] *= 100
317
+
318
+ est = LogisticRegression(random_state=0, fit_intercept=False)
319
+ transformer = SelectFromModel(estimator=est)
320
+ transformer.fit(X, y, sample_weight=None)
321
+ mask = transformer._get_support_mask()
322
+ transformer.fit(X, y, sample_weight=sample_weight)
323
+ weighted_mask = transformer._get_support_mask()
324
+ assert not np.all(weighted_mask == mask)
325
+ transformer.fit(X, y, sample_weight=3 * sample_weight)
326
+ reweighted_mask = transformer._get_support_mask()
327
+ assert np.all(weighted_mask == reweighted_mask)
328
+
329
+
330
+ @pytest.mark.parametrize(
331
+ "estimator",
332
+ [
333
+ Lasso(alpha=0.1, random_state=42),
334
+ LassoCV(random_state=42),
335
+ ElasticNet(l1_ratio=1, random_state=42),
336
+ ElasticNetCV(l1_ratio=[1], random_state=42),
337
+ ],
338
+ )
339
+ def test_coef_default_threshold(estimator):
340
+ X, y = datasets.make_classification(
341
+ n_samples=100,
342
+ n_features=10,
343
+ n_informative=3,
344
+ n_redundant=0,
345
+ n_repeated=0,
346
+ shuffle=False,
347
+ random_state=0,
348
+ )
349
+
350
+ # For the Lasso and related models, the threshold defaults to 1e-5
351
+ transformer = SelectFromModel(estimator=estimator)
352
+ transformer.fit(X, y)
353
+ X_new = transformer.transform(X)
354
+ mask = np.abs(transformer.estimator_.coef_) > 1e-5
355
+ assert_array_almost_equal(X_new, X[:, mask])
356
+
357
+
358
+ @skip_if_32bit
359
+ def test_2d_coef():
360
+ X, y = datasets.make_classification(
361
+ n_samples=1000,
362
+ n_features=10,
363
+ n_informative=3,
364
+ n_redundant=0,
365
+ n_repeated=0,
366
+ shuffle=False,
367
+ random_state=0,
368
+ n_classes=4,
369
+ )
370
+
371
+ est = LogisticRegression()
372
+ for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
373
+ for order in [1, 2, np.inf]:
374
+ # Fit SelectFromModel a multi-class problem
375
+ transformer = SelectFromModel(
376
+ estimator=LogisticRegression(), threshold=threshold, norm_order=order
377
+ )
378
+ transformer.fit(X, y)
379
+ assert hasattr(transformer.estimator_, "coef_")
380
+ X_new = transformer.transform(X)
381
+ assert X_new.shape[1] < X.shape[1]
382
+
383
+ # Manually check that the norm is correctly performed
384
+ est.fit(X, y)
385
+ importances = np.linalg.norm(est.coef_, axis=0, ord=order)
386
+ feature_mask = importances > func(importances)
387
+ assert_array_almost_equal(X_new, X[:, feature_mask])
388
+
389
+
390
+ def test_partial_fit():
391
+ est = PassiveAggressiveClassifier(
392
+ random_state=0, shuffle=False, max_iter=5, tol=None
393
+ )
394
+ transformer = SelectFromModel(estimator=est)
395
+ transformer.partial_fit(data, y, classes=np.unique(y))
396
+ old_model = transformer.estimator_
397
+ transformer.partial_fit(data, y, classes=np.unique(y))
398
+ new_model = transformer.estimator_
399
+ assert old_model is new_model
400
+
401
+ X_transform = transformer.transform(data)
402
+ transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
403
+ assert_array_almost_equal(X_transform, transformer.transform(data))
404
+
405
+ # check that if est doesn't have partial_fit, neither does SelectFromModel
406
+ transformer = SelectFromModel(estimator=RandomForestClassifier())
407
+ assert not hasattr(transformer, "partial_fit")
408
+
409
+
410
+ def test_calling_fit_reinitializes():
411
+ est = LinearSVC(dual="auto", random_state=0)
412
+ transformer = SelectFromModel(estimator=est)
413
+ transformer.fit(data, y)
414
+ transformer.set_params(estimator__C=100)
415
+ transformer.fit(data, y)
416
+ assert transformer.estimator_.C == 100
417
+
418
+
419
+ def test_prefit():
420
+ # Test all possible combinations of the prefit parameter.
421
+
422
+ # Passing a prefit parameter with the selected model
423
+ # and fitting a unfit model with prefit=False should give same results.
424
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
425
+ model = SelectFromModel(clf)
426
+ model.fit(data, y)
427
+ X_transform = model.transform(data)
428
+ clf.fit(data, y)
429
+ model = SelectFromModel(clf, prefit=True)
430
+ assert_array_almost_equal(model.transform(data), X_transform)
431
+ model.fit(data, y)
432
+ assert model.estimator_ is not clf
433
+
434
+ # Check that the model is rewritten if prefit=False and a fitted model is
435
+ # passed
436
+ model = SelectFromModel(clf, prefit=False)
437
+ model.fit(data, y)
438
+ assert_array_almost_equal(model.transform(data), X_transform)
439
+
440
+ # Check that passing an unfitted estimator with `prefit=True` raises a
441
+ # `ValueError`
442
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
443
+ model = SelectFromModel(clf, prefit=True)
444
+ err_msg = "When `prefit=True`, `estimator` is expected to be a fitted estimator."
445
+ with pytest.raises(NotFittedError, match=err_msg):
446
+ model.fit(data, y)
447
+ with pytest.raises(NotFittedError, match=err_msg):
448
+ model.partial_fit(data, y)
449
+ with pytest.raises(NotFittedError, match=err_msg):
450
+ model.transform(data)
451
+
452
+ # Check that the internal parameters of prefitted model are not changed
453
+ # when calling `fit` or `partial_fit` with `prefit=True`
454
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, tol=None).fit(data, y)
455
+ model = SelectFromModel(clf, prefit=True)
456
+ model.fit(data, y)
457
+ assert_allclose(model.estimator_.coef_, clf.coef_)
458
+ model.partial_fit(data, y)
459
+ assert_allclose(model.estimator_.coef_, clf.coef_)
460
+
461
+
462
+ def test_prefit_max_features():
463
+ """Check the interaction between `prefit` and `max_features`."""
464
+ # case 1: an error should be raised at `transform` if `fit` was not called to
465
+ # validate the attributes
466
+ estimator = RandomForestClassifier(n_estimators=5, random_state=0)
467
+ estimator.fit(data, y)
468
+ model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1])
469
+
470
+ err_msg = (
471
+ "When `prefit=True` and `max_features` is a callable, call `fit` "
472
+ "before calling `transform`."
473
+ )
474
+ with pytest.raises(NotFittedError, match=err_msg):
475
+ model.transform(data)
476
+
477
+ # case 2: `max_features` is not validated and different from an integer
478
+ # FIXME: we cannot validate the upper bound of the attribute at transform
479
+ # and we should force calling `fit` if we intend to force the attribute
480
+ # to have such an upper bound.
481
+ max_features = 2.5
482
+ model.set_params(max_features=max_features)
483
+ with pytest.raises(ValueError, match="`max_features` must be an integer"):
484
+ model.transform(data)
485
+
486
+
487
+ def test_prefit_get_feature_names_out():
488
+ """Check the interaction between prefit and the feature names."""
489
+ clf = RandomForestClassifier(n_estimators=2, random_state=0)
490
+ clf.fit(data, y)
491
+ model = SelectFromModel(clf, prefit=True, max_features=1)
492
+
493
+ name = type(model).__name__
494
+ err_msg = (
495
+ f"This {name} instance is not fitted yet. Call 'fit' with "
496
+ "appropriate arguments before using this estimator."
497
+ )
498
+ with pytest.raises(NotFittedError, match=err_msg):
499
+ model.get_feature_names_out()
500
+
501
+ model.fit(data, y)
502
+ feature_names = model.get_feature_names_out()
503
+ assert feature_names == ["x3"]
504
+
505
+
506
+ def test_threshold_string():
507
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
508
+ model = SelectFromModel(est, threshold="0.5*mean")
509
+ model.fit(data, y)
510
+ X_transform = model.transform(data)
511
+
512
+ # Calculate the threshold from the estimator directly.
513
+ est.fit(data, y)
514
+ threshold = 0.5 * np.mean(est.feature_importances_)
515
+ mask = est.feature_importances_ > threshold
516
+ assert_array_almost_equal(X_transform, data[:, mask])
517
+
518
+
519
+ def test_threshold_without_refitting():
520
+ # Test that the threshold can be set without refitting the model.
521
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
522
+ model = SelectFromModel(clf, threshold="0.1 * mean")
523
+ model.fit(data, y)
524
+ X_transform = model.transform(data)
525
+
526
+ # Set a higher threshold to filter out more features.
527
+ model.threshold = "1.0 * mean"
528
+ assert X_transform.shape[1] > model.transform(data).shape[1]
529
+
530
+
531
+ def test_fit_accepts_nan_inf():
532
+ # Test that fit doesn't check for np.inf and np.nan values.
533
+ clf = HistGradientBoostingClassifier(random_state=0)
534
+
535
+ model = SelectFromModel(estimator=clf)
536
+
537
+ nan_data = data.copy()
538
+ nan_data[0] = np.nan
539
+ nan_data[1] = np.inf
540
+
541
+ model.fit(data, y)
542
+
543
+
544
+ def test_transform_accepts_nan_inf():
545
+ # Test that transform doesn't check for np.inf and np.nan values.
546
+ clf = NaNTagRandomForest(n_estimators=100, random_state=0)
547
+ nan_data = data.copy()
548
+
549
+ model = SelectFromModel(estimator=clf)
550
+ model.fit(nan_data, y)
551
+
552
+ nan_data[0] = np.nan
553
+ nan_data[1] = np.inf
554
+
555
+ model.transform(nan_data)
556
+
557
+
558
+ def test_allow_nan_tag_comes_from_estimator():
559
+ allow_nan_est = NaNTag()
560
+ model = SelectFromModel(estimator=allow_nan_est)
561
+ assert model._get_tags()["allow_nan"] is True
562
+
563
+ no_nan_est = NoNaNTag()
564
+ model = SelectFromModel(estimator=no_nan_est)
565
+ assert model._get_tags()["allow_nan"] is False
566
+
567
+
568
+ def _pca_importances(pca_estimator):
569
+ return np.abs(pca_estimator.explained_variance_)
570
+
571
+
572
+ @pytest.mark.parametrize(
573
+ "estimator, importance_getter",
574
+ [
575
+ (
576
+ make_pipeline(PCA(random_state=0), LogisticRegression()),
577
+ "named_steps.logisticregression.coef_",
578
+ ),
579
+ (PCA(random_state=0), _pca_importances),
580
+ ],
581
+ )
582
+ def test_importance_getter(estimator, importance_getter):
583
+ selector = SelectFromModel(
584
+ estimator, threshold="mean", importance_getter=importance_getter
585
+ )
586
+ selector.fit(data, y)
587
+ assert selector.transform(data).shape[1] == 1
588
+
589
+
590
+ @pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression])
591
+ def test_select_from_model_pls(PLSEstimator):
592
+ """Check the behaviour of SelectFromModel with PLS estimators.
593
+
594
+ Non-regression test for:
595
+ https://github.com/scikit-learn/scikit-learn/issues/12410
596
+ """
597
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
598
+ estimator = PLSEstimator(n_components=1)
599
+ model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y)
600
+ assert model.score(X, y) > 0.5
601
+
602
+
603
+ def test_estimator_does_not_support_feature_names():
604
+ """SelectFromModel works with estimators that do not support feature_names_in_.
605
+
606
+ Non-regression test for #21949.
607
+ """
608
+ pytest.importorskip("pandas")
609
+ X, y = datasets.load_iris(as_frame=True, return_X_y=True)
610
+ all_feature_names = set(X.columns)
611
+
612
+ def importance_getter(estimator):
613
+ return np.arange(X.shape[1])
614
+
615
+ selector = SelectFromModel(
616
+ MinimalClassifier(), importance_getter=importance_getter
617
+ ).fit(X, y)
618
+
619
+ # selector learns the feature names itself
620
+ assert_array_equal(selector.feature_names_in_, X.columns)
621
+
622
+ feature_names_out = set(selector.get_feature_names_out())
623
+ assert feature_names_out < all_feature_names
624
+
625
+ with warnings.catch_warnings():
626
+ warnings.simplefilter("error", UserWarning)
627
+
628
+ selector.transform(X.iloc[1:3])
629
+
630
+
631
+ @pytest.mark.parametrize(
632
+ "error, err_msg, max_features",
633
+ (
634
+ [ValueError, "max_features == 10, must be <= 4", 10],
635
+ [ValueError, "max_features == 5, must be <= 4", lambda x: x.shape[1] + 1],
636
+ ),
637
+ )
638
+ def test_partial_fit_validate_max_features(error, err_msg, max_features):
639
+ """Test that partial_fit from SelectFromModel validates `max_features`."""
640
+ X, y = datasets.make_classification(
641
+ n_samples=100,
642
+ n_features=4,
643
+ random_state=0,
644
+ )
645
+
646
+ with pytest.raises(error, match=err_msg):
647
+ SelectFromModel(
648
+ estimator=SGDClassifier(), max_features=max_features
649
+ ).partial_fit(X, y, classes=[0, 1])
650
+
651
+
652
+ @pytest.mark.parametrize("as_frame", [True, False])
653
+ def test_partial_fit_validate_feature_names(as_frame):
654
+ """Test that partial_fit from SelectFromModel validates `feature_names_in_`."""
655
+ pytest.importorskip("pandas")
656
+ X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True)
657
+
658
+ selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit(
659
+ X, y, classes=[0, 1, 2]
660
+ )
661
+ if as_frame:
662
+ assert_array_equal(selector.feature_names_in_, X.columns)
663
+ else:
664
+ assert not hasattr(selector, "feature_names_in_")
665
+
666
+
667
+ def test_from_model_estimator_attribute_error():
668
+ """Check that we raise the proper AttributeError when the estimator
669
+ does not implement the `partial_fit` method, which is decorated with
670
+ `available_if`.
671
+
672
+ Non-regression test for:
673
+ https://github.com/scikit-learn/scikit-learn/issues/28108
674
+ """
675
+ # `LinearRegression` does not implement 'partial_fit' and should raise an
676
+ # AttributeError
677
+ from_model = SelectFromModel(estimator=LinearRegression())
678
+
679
+ outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'"
680
+ inner_msg = "'LinearRegression' object has no attribute 'partial_fit'"
681
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
682
+ from_model.fit(data, y).partial_fit(data)
683
+ assert isinstance(exec_info.value.__cause__, AttributeError)
684
+ assert inner_msg in str(exec_info.value.__cause__)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.feature_selection import mutual_info_classif, mutual_info_regression
5
+ from sklearn.feature_selection._mutual_info import _compute_mi
6
+ from sklearn.utils import check_random_state
7
+ from sklearn.utils._testing import (
8
+ assert_allclose,
9
+ assert_array_equal,
10
+ )
11
+ from sklearn.utils.fixes import CSR_CONTAINERS
12
+
13
+
14
+ def test_compute_mi_dd():
15
+ # In discrete case computations are straightforward and can be done
16
+ # by hand on given vectors.
17
+ x = np.array([0, 1, 1, 0, 0])
18
+ y = np.array([1, 0, 0, 0, 1])
19
+
20
+ H_x = H_y = -(3 / 5) * np.log(3 / 5) - (2 / 5) * np.log(2 / 5)
21
+ H_xy = -1 / 5 * np.log(1 / 5) - 2 / 5 * np.log(2 / 5) - 2 / 5 * np.log(2 / 5)
22
+ I_xy = H_x + H_y - H_xy
23
+
24
+ assert_allclose(_compute_mi(x, y, x_discrete=True, y_discrete=True), I_xy)
25
+
26
+
27
+ def test_compute_mi_cc(global_dtype):
28
+ # For two continuous variables a good approach is to test on bivariate
29
+ # normal distribution, where mutual information is known.
30
+
31
+ # Mean of the distribution, irrelevant for mutual information.
32
+ mean = np.zeros(2)
33
+
34
+ # Setup covariance matrix with correlation coeff. equal 0.5.
35
+ sigma_1 = 1
36
+ sigma_2 = 10
37
+ corr = 0.5
38
+ cov = np.array(
39
+ [
40
+ [sigma_1**2, corr * sigma_1 * sigma_2],
41
+ [corr * sigma_1 * sigma_2, sigma_2**2],
42
+ ]
43
+ )
44
+
45
+ # True theoretical mutual information.
46
+ I_theory = np.log(sigma_1) + np.log(sigma_2) - 0.5 * np.log(np.linalg.det(cov))
47
+
48
+ rng = check_random_state(0)
49
+ Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False)
50
+
51
+ x, y = Z[:, 0], Z[:, 1]
52
+
53
+ # Theory and computed values won't be very close
54
+ # We here check with a large relative tolerance
55
+ for n_neighbors in [3, 5, 7]:
56
+ I_computed = _compute_mi(
57
+ x, y, x_discrete=False, y_discrete=False, n_neighbors=n_neighbors
58
+ )
59
+ assert_allclose(I_computed, I_theory, rtol=1e-1)
60
+
61
+
62
+ def test_compute_mi_cd(global_dtype):
63
+ # To test define a joint distribution as follows:
64
+ # p(x, y) = p(x) p(y | x)
65
+ # X ~ Bernoulli(p)
66
+ # (Y | x = 0) ~ Uniform(-1, 1)
67
+ # (Y | x = 1) ~ Uniform(0, 2)
68
+
69
+ # Use the following formula for mutual information:
70
+ # I(X; Y) = H(Y) - H(Y | X)
71
+ # Two entropies can be computed by hand:
72
+ # H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2)
73
+ # H(Y | X) = ln(2)
74
+
75
+ # Now we need to implement sampling from out distribution, which is
76
+ # done easily using conditional distribution logic.
77
+
78
+ n_samples = 1000
79
+ rng = check_random_state(0)
80
+
81
+ for p in [0.3, 0.5, 0.7]:
82
+ x = rng.uniform(size=n_samples) > p
83
+
84
+ y = np.empty(n_samples, global_dtype)
85
+ mask = x == 0
86
+ y[mask] = rng.uniform(-1, 1, size=np.sum(mask))
87
+ y[~mask] = rng.uniform(0, 2, size=np.sum(~mask))
88
+
89
+ I_theory = -0.5 * (
90
+ (1 - p) * np.log(0.5 * (1 - p)) + p * np.log(0.5 * p) + np.log(0.5)
91
+ ) - np.log(2)
92
+
93
+ # Assert the same tolerance.
94
+ for n_neighbors in [3, 5, 7]:
95
+ I_computed = _compute_mi(
96
+ x, y, x_discrete=True, y_discrete=False, n_neighbors=n_neighbors
97
+ )
98
+ assert_allclose(I_computed, I_theory, rtol=1e-1)
99
+
100
+
101
+ def test_compute_mi_cd_unique_label(global_dtype):
102
+ # Test that adding unique label doesn't change MI.
103
+ n_samples = 100
104
+ x = np.random.uniform(size=n_samples) > 0.5
105
+
106
+ y = np.empty(n_samples, global_dtype)
107
+ mask = x == 0
108
+ y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
109
+ y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
110
+
111
+ mi_1 = _compute_mi(x, y, x_discrete=True, y_discrete=False)
112
+
113
+ x = np.hstack((x, 2))
114
+ y = np.hstack((y, 10))
115
+ mi_2 = _compute_mi(x, y, x_discrete=True, y_discrete=False)
116
+
117
+ assert_allclose(mi_1, mi_2)
118
+
119
+
120
+ # We are going test that feature ordering by MI matches our expectations.
121
+ def test_mutual_info_classif_discrete(global_dtype):
122
+ X = np.array(
123
+ [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype
124
+ )
125
+ y = np.array([0, 1, 2, 2, 1])
126
+
127
+ # Here X[:, 0] is the most informative feature, and X[:, 1] is weakly
128
+ # informative.
129
+ mi = mutual_info_classif(X, y, discrete_features=True)
130
+ assert_array_equal(np.argsort(-mi), np.array([0, 2, 1]))
131
+
132
+
133
+ def test_mutual_info_regression(global_dtype):
134
+ # We generate sample from multivariate normal distribution, using
135
+ # transformation from initially uncorrelated variables. The zero
136
+ # variables after transformation is selected as the target vector,
137
+ # it has the strongest correlation with the variable 2, and
138
+ # the weakest correlation with the variable 1.
139
+ T = np.array([[1, 0.5, 2, 1], [0, 1, 0.1, 0.0], [0, 0.1, 1, 0.1], [0, 0.1, 0.1, 1]])
140
+ cov = T.dot(T.T)
141
+ mean = np.zeros(4)
142
+
143
+ rng = check_random_state(0)
144
+ Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False)
145
+ X = Z[:, 1:]
146
+ y = Z[:, 0]
147
+
148
+ mi = mutual_info_regression(X, y, random_state=0)
149
+ assert_array_equal(np.argsort(-mi), np.array([1, 2, 0]))
150
+ # XXX: should mutual_info_regression be fixed to avoid
151
+ # up-casting float32 inputs to float64?
152
+ assert mi.dtype == np.float64
153
+
154
+
155
+ def test_mutual_info_classif_mixed(global_dtype):
156
+ # Here the target is discrete and there are two continuous and one
157
+ # discrete feature. The idea of this test is clear from the code.
158
+ rng = check_random_state(0)
159
+ X = rng.rand(1000, 3).astype(global_dtype, copy=False)
160
+ X[:, 1] += X[:, 0]
161
+ y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int)
162
+ X[:, 2] = X[:, 2] > 0.5
163
+
164
+ mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3, random_state=0)
165
+ assert_array_equal(np.argsort(-mi), [2, 0, 1])
166
+ for n_neighbors in [5, 7, 9]:
167
+ mi_nn = mutual_info_classif(
168
+ X, y, discrete_features=[2], n_neighbors=n_neighbors, random_state=0
169
+ )
170
+ # Check that the continuous values have an higher MI with greater
171
+ # n_neighbors
172
+ assert mi_nn[0] > mi[0]
173
+ assert mi_nn[1] > mi[1]
174
+ # The n_neighbors should not have any effect on the discrete value
175
+ # The MI should be the same
176
+ assert mi_nn[2] == mi[2]
177
+
178
+
179
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
180
+ def test_mutual_info_options(global_dtype, csr_container):
181
+ X = np.array(
182
+ [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype
183
+ )
184
+ y = np.array([0, 1, 2, 2, 1], dtype=global_dtype)
185
+ X_csr = csr_container(X)
186
+
187
+ for mutual_info in (mutual_info_regression, mutual_info_classif):
188
+ with pytest.raises(ValueError):
189
+ mutual_info(X_csr, y, discrete_features=False)
190
+ with pytest.raises(ValueError):
191
+ mutual_info(X, y, discrete_features="manual")
192
+ with pytest.raises(ValueError):
193
+ mutual_info(X_csr, y, discrete_features=[True, False, True])
194
+ with pytest.raises(IndexError):
195
+ mutual_info(X, y, discrete_features=[True, False, True, False])
196
+ with pytest.raises(IndexError):
197
+ mutual_info(X, y, discrete_features=[1, 4])
198
+
199
+ mi_1 = mutual_info(X, y, discrete_features="auto", random_state=0)
200
+ mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)
201
+ mi_3 = mutual_info(X_csr, y, discrete_features="auto", random_state=0)
202
+ mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0)
203
+ mi_5 = mutual_info(X, y, discrete_features=[True, False, True], random_state=0)
204
+ mi_6 = mutual_info(X, y, discrete_features=[0, 2], random_state=0)
205
+
206
+ assert_allclose(mi_1, mi_2)
207
+ assert_allclose(mi_3, mi_4)
208
+ assert_allclose(mi_5, mi_6)
209
+
210
+ assert not np.allclose(mi_1, mi_3)
211
+
212
+
213
+ @pytest.mark.parametrize("correlated", [True, False])
214
+ def test_mutual_information_symmetry_classif_regression(correlated, global_random_seed):
215
+ """Check that `mutual_info_classif` and `mutual_info_regression` are
216
+ symmetric by switching the target `y` as `feature` in `X` and vice
217
+ versa.
218
+
219
+ Non-regression test for:
220
+ https://github.com/scikit-learn/scikit-learn/issues/23720
221
+ """
222
+ rng = np.random.RandomState(global_random_seed)
223
+ n = 100
224
+ d = rng.randint(10, size=n)
225
+
226
+ if correlated:
227
+ c = d.astype(np.float64)
228
+ else:
229
+ c = rng.normal(0, 1, size=n)
230
+
231
+ mi_classif = mutual_info_classif(
232
+ c[:, None], d, discrete_features=[False], random_state=global_random_seed
233
+ )
234
+
235
+ mi_regression = mutual_info_regression(
236
+ d[:, None], c, discrete_features=[True], random_state=global_random_seed
237
+ )
238
+
239
+ assert mi_classif == pytest.approx(mi_regression)
240
+
241
+
242
+ def test_mutual_info_regression_X_int_dtype(global_random_seed):
243
+ """Check that results agree when X is integer dtype and float dtype.
244
+
245
+ Non-regression test for Issue #26696.
246
+ """
247
+ rng = np.random.RandomState(global_random_seed)
248
+ X = rng.randint(100, size=(100, 10))
249
+ X_float = X.astype(np.float64, copy=True)
250
+ y = rng.randint(100, size=100)
251
+
252
+ expected = mutual_info_regression(X_float, y, random_state=global_random_seed)
253
+ result = mutual_info_regression(X, y, random_state=global_random_seed)
254
+ assert_allclose(result, expected)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing Recursive feature elimination
3
+ """
4
+
5
+ from operator import attrgetter
6
+
7
+ import numpy as np
8
+ import pytest
9
+ from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
10
+
11
+ from sklearn.base import BaseEstimator, ClassifierMixin
12
+ from sklearn.compose import TransformedTargetRegressor
13
+ from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression
14
+ from sklearn.datasets import load_iris, make_friedman1
15
+ from sklearn.ensemble import RandomForestClassifier
16
+ from sklearn.feature_selection import RFE, RFECV
17
+ from sklearn.impute import SimpleImputer
18
+ from sklearn.linear_model import LinearRegression, LogisticRegression
19
+ from sklearn.metrics import get_scorer, make_scorer, zero_one_loss
20
+ from sklearn.model_selection import GroupKFold, cross_val_score
21
+ from sklearn.pipeline import make_pipeline
22
+ from sklearn.preprocessing import StandardScaler
23
+ from sklearn.svm import SVC, SVR, LinearSVR
24
+ from sklearn.utils import check_random_state
25
+ from sklearn.utils._testing import ignore_warnings
26
+ from sklearn.utils.fixes import CSR_CONTAINERS
27
+
28
+
29
+ class MockClassifier:
30
+ """
31
+ Dummy classifier to test recursive feature elimination
32
+ """
33
+
34
+ def __init__(self, foo_param=0):
35
+ self.foo_param = foo_param
36
+
37
+ def fit(self, X, y):
38
+ assert len(X) == len(y)
39
+ self.coef_ = np.ones(X.shape[1], dtype=np.float64)
40
+ return self
41
+
42
+ def predict(self, T):
43
+ return T.shape[0]
44
+
45
+ predict_proba = predict
46
+ decision_function = predict
47
+ transform = predict
48
+
49
+ def score(self, X=None, y=None):
50
+ return 0.0
51
+
52
+ def get_params(self, deep=True):
53
+ return {"foo_param": self.foo_param}
54
+
55
+ def set_params(self, **params):
56
+ return self
57
+
58
+ def _more_tags(self):
59
+ return {"allow_nan": True}
60
+
61
+
62
+ def test_rfe_features_importance():
63
+ generator = check_random_state(0)
64
+ iris = load_iris()
65
+ # Add some irrelevant features. Random seed is set to make sure that
66
+ # irrelevant features are always irrelevant.
67
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
68
+ y = iris.target
69
+
70
+ clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2)
71
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
72
+ rfe.fit(X, y)
73
+ assert len(rfe.ranking_) == X.shape[1]
74
+
75
+ clf_svc = SVC(kernel="linear")
76
+ rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
77
+ rfe_svc.fit(X, y)
78
+
79
+ # Check if the supports are equal
80
+ assert_array_equal(rfe.get_support(), rfe_svc.get_support())
81
+
82
+
83
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
84
+ def test_rfe(csr_container):
85
+ generator = check_random_state(0)
86
+ iris = load_iris()
87
+ # Add some irrelevant features. Random seed is set to make sure that
88
+ # irrelevant features are always irrelevant.
89
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
90
+ X_sparse = csr_container(X)
91
+ y = iris.target
92
+
93
+ # dense model
94
+ clf = SVC(kernel="linear")
95
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
96
+ rfe.fit(X, y)
97
+ X_r = rfe.transform(X)
98
+ clf.fit(X_r, y)
99
+ assert len(rfe.ranking_) == X.shape[1]
100
+
101
+ # sparse model
102
+ clf_sparse = SVC(kernel="linear")
103
+ rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
104
+ rfe_sparse.fit(X_sparse, y)
105
+ X_r_sparse = rfe_sparse.transform(X_sparse)
106
+
107
+ assert X_r.shape == iris.data.shape
108
+ assert_array_almost_equal(X_r[:10], iris.data[:10])
109
+
110
+ assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
111
+ assert rfe.score(X, y) == clf.score(iris.data, iris.target)
112
+ assert_array_almost_equal(X_r, X_r_sparse.toarray())
113
+
114
+
115
+ def test_RFE_fit_score_params():
116
+ # Make sure RFE passes the metadata down to fit and score methods of the
117
+ # underlying estimator
118
+ class TestEstimator(BaseEstimator, ClassifierMixin):
119
+ def fit(self, X, y, prop=None):
120
+ if prop is None:
121
+ raise ValueError("fit: prop cannot be None")
122
+ self.svc_ = SVC(kernel="linear").fit(X, y)
123
+ self.coef_ = self.svc_.coef_
124
+ return self
125
+
126
+ def score(self, X, y, prop=None):
127
+ if prop is None:
128
+ raise ValueError("score: prop cannot be None")
129
+ return self.svc_.score(X, y)
130
+
131
+ X, y = load_iris(return_X_y=True)
132
+ with pytest.raises(ValueError, match="fit: prop cannot be None"):
133
+ RFE(estimator=TestEstimator()).fit(X, y)
134
+ with pytest.raises(ValueError, match="score: prop cannot be None"):
135
+ RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y)
136
+
137
+ RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y, prop="foo")
138
+
139
+
140
+ def test_rfe_percent_n_features():
141
+ # test that the results are the same
142
+ generator = check_random_state(0)
143
+ iris = load_iris()
144
+ # Add some irrelevant features. Random seed is set to make sure that
145
+ # irrelevant features are always irrelevant.
146
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
147
+ y = iris.target
148
+ # there are 10 features in the data. We select 40%.
149
+ clf = SVC(kernel="linear")
150
+ rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1)
151
+ rfe_num.fit(X, y)
152
+
153
+ rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1)
154
+ rfe_perc.fit(X, y)
155
+
156
+ assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_)
157
+ assert_array_equal(rfe_perc.support_, rfe_num.support_)
158
+
159
+
160
+ def test_rfe_mockclassifier():
161
+ generator = check_random_state(0)
162
+ iris = load_iris()
163
+ # Add some irrelevant features. Random seed is set to make sure that
164
+ # irrelevant features are always irrelevant.
165
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
166
+ y = iris.target
167
+
168
+ # dense model
169
+ clf = MockClassifier()
170
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
171
+ rfe.fit(X, y)
172
+ X_r = rfe.transform(X)
173
+ clf.fit(X_r, y)
174
+ assert len(rfe.ranking_) == X.shape[1]
175
+ assert X_r.shape == iris.data.shape
176
+
177
+
178
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
179
+ def test_rfecv(csr_container):
180
+ generator = check_random_state(0)
181
+ iris = load_iris()
182
+ # Add some irrelevant features. Random seed is set to make sure that
183
+ # irrelevant features are always irrelevant.
184
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
185
+ y = list(iris.target) # regression test: list should be supported
186
+
187
+ # Test using the score function
188
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1)
189
+ rfecv.fit(X, y)
190
+ # non-regression test for missing worst feature:
191
+
192
+ for key in rfecv.cv_results_.keys():
193
+ assert len(rfecv.cv_results_[key]) == X.shape[1]
194
+
195
+ assert len(rfecv.ranking_) == X.shape[1]
196
+ X_r = rfecv.transform(X)
197
+
198
+ # All the noisy variable were filtered out
199
+ assert_array_equal(X_r, iris.data)
200
+
201
+ # same in sparse
202
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1)
203
+ X_sparse = csr_container(X)
204
+ rfecv_sparse.fit(X_sparse, y)
205
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
206
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
207
+
208
+ # Test using a customized loss function
209
+ scoring = make_scorer(zero_one_loss, greater_is_better=False)
210
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring)
211
+ ignore_warnings(rfecv.fit)(X, y)
212
+ X_r = rfecv.transform(X)
213
+ assert_array_equal(X_r, iris.data)
214
+
215
+ # Test using a scorer
216
+ scorer = get_scorer("accuracy")
217
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer)
218
+ rfecv.fit(X, y)
219
+ X_r = rfecv.transform(X)
220
+ assert_array_equal(X_r, iris.data)
221
+
222
+ # Test fix on cv_results_
223
+ def test_scorer(estimator, X, y):
224
+ return 1.0
225
+
226
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer)
227
+ rfecv.fit(X, y)
228
+
229
+ # In the event of cross validation score ties, the expected behavior of
230
+ # RFECV is to return the FEWEST features that maximize the CV score.
231
+ # Because test_scorer always returns 1.0 in this example, RFECV should
232
+ # reduce the dimensionality to a single feature (i.e. n_features_ = 1)
233
+ assert rfecv.n_features_ == 1
234
+
235
+ # Same as the first two tests, but with step=2
236
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=2)
237
+ rfecv.fit(X, y)
238
+
239
+ for key in rfecv.cv_results_.keys():
240
+ assert len(rfecv.cv_results_[key]) == 6
241
+
242
+ assert len(rfecv.ranking_) == X.shape[1]
243
+ X_r = rfecv.transform(X)
244
+ assert_array_equal(X_r, iris.data)
245
+
246
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2)
247
+ X_sparse = csr_container(X)
248
+ rfecv_sparse.fit(X_sparse, y)
249
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
250
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
251
+
252
+ # Verifying that steps < 1 don't blow up.
253
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=0.2)
254
+ X_sparse = csr_container(X)
255
+ rfecv_sparse.fit(X_sparse, y)
256
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
257
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
258
+
259
+
260
+ def test_rfecv_mockclassifier():
261
+ generator = check_random_state(0)
262
+ iris = load_iris()
263
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
264
+ y = list(iris.target) # regression test: list should be supported
265
+
266
+ # Test using the score function
267
+ rfecv = RFECV(estimator=MockClassifier(), step=1)
268
+ rfecv.fit(X, y)
269
+ # non-regression test for missing worst feature:
270
+
271
+ for key in rfecv.cv_results_.keys():
272
+ assert len(rfecv.cv_results_[key]) == X.shape[1]
273
+
274
+ assert len(rfecv.ranking_) == X.shape[1]
275
+
276
+
277
+ def test_rfecv_verbose_output():
278
+ # Check verbose=1 is producing an output.
279
+ import sys
280
+ from io import StringIO
281
+
282
+ sys.stdout = StringIO()
283
+
284
+ generator = check_random_state(0)
285
+ iris = load_iris()
286
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
287
+ y = list(iris.target)
288
+
289
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1)
290
+ rfecv.fit(X, y)
291
+
292
+ verbose_output = sys.stdout
293
+ verbose_output.seek(0)
294
+ assert len(verbose_output.readline()) > 0
295
+
296
+
297
+ def test_rfecv_cv_results_size(global_random_seed):
298
+ generator = check_random_state(global_random_seed)
299
+ iris = load_iris()
300
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
301
+ y = list(iris.target) # regression test: list should be supported
302
+
303
+ # Non-regression test for varying combinations of step and
304
+ # min_features_to_select.
305
+ for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]:
306
+ rfecv = RFECV(
307
+ estimator=MockClassifier(),
308
+ step=step,
309
+ min_features_to_select=min_features_to_select,
310
+ )
311
+ rfecv.fit(X, y)
312
+
313
+ score_len = np.ceil((X.shape[1] - min_features_to_select) / step) + 1
314
+
315
+ for key in rfecv.cv_results_.keys():
316
+ assert len(rfecv.cv_results_[key]) == score_len
317
+
318
+ assert len(rfecv.ranking_) == X.shape[1]
319
+ assert rfecv.n_features_ >= min_features_to_select
320
+
321
+
322
+ def test_rfe_estimator_tags():
323
+ rfe = RFE(SVC(kernel="linear"))
324
+ assert rfe._estimator_type == "classifier"
325
+ # make sure that cross-validation is stratified
326
+ iris = load_iris()
327
+ score = cross_val_score(rfe, iris.data, iris.target)
328
+ assert score.min() > 0.7
329
+
330
+
331
+ def test_rfe_min_step(global_random_seed):
332
+ n_features = 10
333
+ X, y = make_friedman1(
334
+ n_samples=50, n_features=n_features, random_state=global_random_seed
335
+ )
336
+ n_samples, n_features = X.shape
337
+ estimator = SVR(kernel="linear")
338
+
339
+ # Test when floor(step * n_features) <= 0
340
+ selector = RFE(estimator, step=0.01)
341
+ sel = selector.fit(X, y)
342
+ assert sel.support_.sum() == n_features // 2
343
+
344
+ # Test when step is between (0,1) and floor(step * n_features) > 0
345
+ selector = RFE(estimator, step=0.20)
346
+ sel = selector.fit(X, y)
347
+ assert sel.support_.sum() == n_features // 2
348
+
349
+ # Test when step is an integer
350
+ selector = RFE(estimator, step=5)
351
+ sel = selector.fit(X, y)
352
+ assert sel.support_.sum() == n_features // 2
353
+
354
+
355
+ def test_number_of_subsets_of_features(global_random_seed):
356
+ # In RFE, 'number_of_subsets_of_features'
357
+ # = the number of iterations in '_fit'
358
+ # = max(ranking_)
359
+ # = 1 + (n_features + step - n_features_to_select - 1) // step
360
+ # After optimization #4534, this number
361
+ # = 1 + np.ceil((n_features - n_features_to_select) / float(step))
362
+ # This test case is to test their equivalence, refer to #4534 and #3824
363
+
364
+ def formula1(n_features, n_features_to_select, step):
365
+ return 1 + ((n_features + step - n_features_to_select - 1) // step)
366
+
367
+ def formula2(n_features, n_features_to_select, step):
368
+ return 1 + np.ceil((n_features - n_features_to_select) / float(step))
369
+
370
+ # RFE
371
+ # Case 1, n_features - n_features_to_select is divisible by step
372
+ # Case 2, n_features - n_features_to_select is not divisible by step
373
+ n_features_list = [11, 11]
374
+ n_features_to_select_list = [3, 3]
375
+ step_list = [2, 3]
376
+ for n_features, n_features_to_select, step in zip(
377
+ n_features_list, n_features_to_select_list, step_list
378
+ ):
379
+ generator = check_random_state(global_random_seed)
380
+ X = generator.normal(size=(100, n_features))
381
+ y = generator.rand(100).round()
382
+ rfe = RFE(
383
+ estimator=SVC(kernel="linear"),
384
+ n_features_to_select=n_features_to_select,
385
+ step=step,
386
+ )
387
+ rfe.fit(X, y)
388
+ # this number also equals to the maximum of ranking_
389
+ assert np.max(rfe.ranking_) == formula1(n_features, n_features_to_select, step)
390
+ assert np.max(rfe.ranking_) == formula2(n_features, n_features_to_select, step)
391
+
392
+ # In RFECV, 'fit' calls 'RFE._fit'
393
+ # 'number_of_subsets_of_features' of RFE
394
+ # = the size of each score in 'cv_results_' of RFECV
395
+ # = the number of iterations of the for loop before optimization #4534
396
+
397
+ # RFECV, n_features_to_select = 1
398
+ # Case 1, n_features - 1 is divisible by step
399
+ # Case 2, n_features - 1 is not divisible by step
400
+
401
+ n_features_to_select = 1
402
+ n_features_list = [11, 10]
403
+ step_list = [2, 2]
404
+ for n_features, step in zip(n_features_list, step_list):
405
+ generator = check_random_state(global_random_seed)
406
+ X = generator.normal(size=(100, n_features))
407
+ y = generator.rand(100).round()
408
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=step)
409
+ rfecv.fit(X, y)
410
+
411
+ for key in rfecv.cv_results_.keys():
412
+ assert len(rfecv.cv_results_[key]) == formula1(
413
+ n_features, n_features_to_select, step
414
+ )
415
+ assert len(rfecv.cv_results_[key]) == formula2(
416
+ n_features, n_features_to_select, step
417
+ )
418
+
419
+
420
+ def test_rfe_cv_n_jobs(global_random_seed):
421
+ generator = check_random_state(global_random_seed)
422
+ iris = load_iris()
423
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
424
+ y = iris.target
425
+
426
+ rfecv = RFECV(estimator=SVC(kernel="linear"))
427
+ rfecv.fit(X, y)
428
+ rfecv_ranking = rfecv.ranking_
429
+
430
+ rfecv_cv_results_ = rfecv.cv_results_
431
+
432
+ rfecv.set_params(n_jobs=2)
433
+ rfecv.fit(X, y)
434
+ assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
435
+
436
+ assert rfecv_cv_results_.keys() == rfecv.cv_results_.keys()
437
+ for key in rfecv_cv_results_.keys():
438
+ assert rfecv_cv_results_[key] == pytest.approx(rfecv.cv_results_[key])
439
+
440
+
441
+ def test_rfe_cv_groups():
442
+ generator = check_random_state(0)
443
+ iris = load_iris()
444
+ number_groups = 4
445
+ groups = np.floor(np.linspace(0, number_groups, len(iris.target)))
446
+ X = iris.data
447
+ y = (iris.target > 0).astype(int)
448
+
449
+ est_groups = RFECV(
450
+ estimator=RandomForestClassifier(random_state=generator),
451
+ step=1,
452
+ scoring="accuracy",
453
+ cv=GroupKFold(n_splits=2),
454
+ )
455
+ est_groups.fit(X, y, groups=groups)
456
+ assert est_groups.n_features_ > 0
457
+
458
+
459
+ @pytest.mark.parametrize(
460
+ "importance_getter", [attrgetter("regressor_.coef_"), "regressor_.coef_"]
461
+ )
462
+ @pytest.mark.parametrize("selector, expected_n_features", [(RFE, 5), (RFECV, 4)])
463
+ def test_rfe_wrapped_estimator(importance_getter, selector, expected_n_features):
464
+ # Non-regression test for
465
+ # https://github.com/scikit-learn/scikit-learn/issues/15312
466
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
467
+ estimator = LinearSVR(dual="auto", random_state=0)
468
+
469
+ log_estimator = TransformedTargetRegressor(
470
+ regressor=estimator, func=np.log, inverse_func=np.exp
471
+ )
472
+
473
+ selector = selector(log_estimator, importance_getter=importance_getter)
474
+ sel = selector.fit(X, y)
475
+ assert sel.support_.sum() == expected_n_features
476
+
477
+
478
+ @pytest.mark.parametrize(
479
+ "importance_getter, err_type",
480
+ [
481
+ ("auto", ValueError),
482
+ ("random", AttributeError),
483
+ (lambda x: x.importance, AttributeError),
484
+ ],
485
+ )
486
+ @pytest.mark.parametrize("Selector", [RFE, RFECV])
487
+ def test_rfe_importance_getter_validation(importance_getter, err_type, Selector):
488
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=42)
489
+ estimator = LinearSVR(dual="auto")
490
+ log_estimator = TransformedTargetRegressor(
491
+ regressor=estimator, func=np.log, inverse_func=np.exp
492
+ )
493
+
494
+ with pytest.raises(err_type):
495
+ model = Selector(log_estimator, importance_getter=importance_getter)
496
+ model.fit(X, y)
497
+
498
+
499
+ @pytest.mark.parametrize("cv", [None, 5])
500
+ def test_rfe_allow_nan_inf_in_x(cv):
501
+ iris = load_iris()
502
+ X = iris.data
503
+ y = iris.target
504
+
505
+ # add nan and inf value to X
506
+ X[0][0] = np.nan
507
+ X[0][1] = np.inf
508
+
509
+ clf = MockClassifier()
510
+ if cv is not None:
511
+ rfe = RFECV(estimator=clf, cv=cv)
512
+ else:
513
+ rfe = RFE(estimator=clf)
514
+ rfe.fit(X, y)
515
+ rfe.transform(X)
516
+
517
+
518
+ def test_w_pipeline_2d_coef_():
519
+ pipeline = make_pipeline(StandardScaler(), LogisticRegression())
520
+
521
+ data, y = load_iris(return_X_y=True)
522
+ sfm = RFE(
523
+ pipeline,
524
+ n_features_to_select=2,
525
+ importance_getter="named_steps.logisticregression.coef_",
526
+ )
527
+
528
+ sfm.fit(data, y)
529
+ assert sfm.transform(data).shape[1] == 2
530
+
531
+
532
+ def test_rfecv_std_and_mean(global_random_seed):
533
+ generator = check_random_state(global_random_seed)
534
+ iris = load_iris()
535
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
536
+ y = iris.target
537
+
538
+ rfecv = RFECV(estimator=SVC(kernel="linear"))
539
+ rfecv.fit(X, y)
540
+ n_split_keys = len(rfecv.cv_results_) - 2
541
+ split_keys = [f"split{i}_test_score" for i in range(n_split_keys)]
542
+
543
+ cv_scores = np.asarray([rfecv.cv_results_[key] for key in split_keys])
544
+ expected_mean = np.mean(cv_scores, axis=0)
545
+ expected_std = np.std(cv_scores, axis=0)
546
+
547
+ assert_allclose(rfecv.cv_results_["mean_test_score"], expected_mean)
548
+ assert_allclose(rfecv.cv_results_["std_test_score"], expected_std)
549
+
550
+
551
+ @pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
552
+ def test_multioutput(ClsRFE):
553
+ X = np.random.normal(size=(10, 3))
554
+ y = np.random.randint(2, size=(10, 2))
555
+ clf = RandomForestClassifier(n_estimators=5)
556
+ rfe_test = ClsRFE(clf)
557
+ rfe_test.fit(X, y)
558
+
559
+
560
+ @pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
561
+ def test_pipeline_with_nans(ClsRFE):
562
+ """Check that RFE works with pipeline that accept nans.
563
+
564
+ Non-regression test for gh-21743.
565
+ """
566
+ X, y = load_iris(return_X_y=True)
567
+ X[0, 0] = np.nan
568
+
569
+ pipe = make_pipeline(
570
+ SimpleImputer(),
571
+ StandardScaler(),
572
+ LogisticRegression(),
573
+ )
574
+
575
+ fs = ClsRFE(
576
+ estimator=pipe,
577
+ importance_getter="named_steps.logisticregression.coef_",
578
+ )
579
+ fs.fit(X, y)
580
+
581
+
582
+ @pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
583
+ @pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression])
584
+ def test_rfe_pls(ClsRFE, PLSEstimator):
585
+ """Check the behaviour of RFE with PLS estimators.
586
+
587
+ Non-regression test for:
588
+ https://github.com/scikit-learn/scikit-learn/issues/12410
589
+ """
590
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
591
+ estimator = PLSEstimator(n_components=1)
592
+ selector = ClsRFE(estimator, step=1).fit(X, y)
593
+ assert selector.score(X, y) > 0.5
594
+
595
+
596
+ def test_rfe_estimator_attribute_error():
597
+ """Check that we raise the proper AttributeError when the estimator
598
+ does not implement the `decision_function` method, which is decorated with
599
+ `available_if`.
600
+
601
+ Non-regression test for:
602
+ https://github.com/scikit-learn/scikit-learn/issues/28108
603
+ """
604
+ iris = load_iris()
605
+
606
+ # `LinearRegression` does not implement 'decision_function' and should raise an
607
+ # AttributeError
608
+ rfe = RFE(estimator=LinearRegression())
609
+
610
+ outer_msg = "This 'RFE' has no attribute 'decision_function'"
611
+ inner_msg = "'LinearRegression' object has no attribute 'decision_function'"
612
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
613
+ rfe.fit(iris.data, iris.target).decision_function(iris.data)
614
+ assert isinstance(exec_info.value.__cause__, AttributeError)
615
+ assert inner_msg in str(exec_info.value.__cause__)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_array_equal
4
+
5
+ from sklearn.cluster import KMeans
6
+ from sklearn.datasets import make_blobs, make_classification, make_regression
7
+ from sklearn.ensemble import HistGradientBoostingRegressor
8
+ from sklearn.feature_selection import SequentialFeatureSelector
9
+ from sklearn.linear_model import LinearRegression
10
+ from sklearn.model_selection import LeaveOneGroupOut, cross_val_score
11
+ from sklearn.neighbors import KNeighborsClassifier
12
+ from sklearn.pipeline import make_pipeline
13
+ from sklearn.preprocessing import StandardScaler
14
+ from sklearn.utils.fixes import CSR_CONTAINERS
15
+
16
+
17
+ def test_bad_n_features_to_select():
18
+ n_features = 5
19
+ X, y = make_regression(n_features=n_features)
20
+ sfs = SequentialFeatureSelector(LinearRegression(), n_features_to_select=n_features)
21
+ with pytest.raises(ValueError, match="n_features_to_select must be < n_features"):
22
+ sfs.fit(X, y)
23
+
24
+
25
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
26
+ @pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto"))
27
+ def test_n_features_to_select(direction, n_features_to_select):
28
+ # Make sure n_features_to_select is respected
29
+
30
+ n_features = 10
31
+ X, y = make_regression(n_features=n_features, random_state=0)
32
+ sfs = SequentialFeatureSelector(
33
+ LinearRegression(),
34
+ n_features_to_select=n_features_to_select,
35
+ direction=direction,
36
+ cv=2,
37
+ )
38
+ sfs.fit(X, y)
39
+
40
+ if n_features_to_select == "auto":
41
+ n_features_to_select = n_features // 2
42
+
43
+ assert sfs.get_support(indices=True).shape[0] == n_features_to_select
44
+ assert sfs.n_features_to_select_ == n_features_to_select
45
+ assert sfs.transform(X).shape[1] == n_features_to_select
46
+
47
+
48
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
49
+ def test_n_features_to_select_auto(direction):
50
+ """Check the behaviour of `n_features_to_select="auto"` with different
51
+ values for the parameter `tol`.
52
+ """
53
+
54
+ n_features = 10
55
+ tol = 1e-3
56
+ X, y = make_regression(n_features=n_features, random_state=0)
57
+ sfs = SequentialFeatureSelector(
58
+ LinearRegression(),
59
+ n_features_to_select="auto",
60
+ tol=tol,
61
+ direction=direction,
62
+ cv=2,
63
+ )
64
+ sfs.fit(X, y)
65
+
66
+ max_features_to_select = n_features - 1
67
+
68
+ assert sfs.get_support(indices=True).shape[0] <= max_features_to_select
69
+ assert sfs.n_features_to_select_ <= max_features_to_select
70
+ assert sfs.transform(X).shape[1] <= max_features_to_select
71
+ assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_
72
+
73
+
74
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
75
+ def test_n_features_to_select_stopping_criterion(direction):
76
+ """Check the behaviour stopping criterion for feature selection
77
+ depending on the values of `n_features_to_select` and `tol`.
78
+
79
+ When `direction` is `'forward'`, select a new features at random
80
+ among those not currently selected in selector.support_,
81
+ build a new version of the data that includes all the features
82
+ in selector.support_ + this newly selected feature.
83
+ And check that the cross-validation score of the model trained on
84
+ this new dataset variant is lower than the model with
85
+ the selected forward selected features or at least does not improve
86
+ by more than the tol margin.
87
+
88
+ When `direction` is `'backward'`, instead of adding a new feature
89
+ to selector.support_, try to remove one of those selected features at random
90
+ And check that the cross-validation score is either decreasing or
91
+ not improving by more than the tol margin.
92
+ """
93
+
94
+ X, y = make_regression(n_features=50, n_informative=10, random_state=0)
95
+
96
+ tol = 1e-3
97
+
98
+ sfs = SequentialFeatureSelector(
99
+ LinearRegression(),
100
+ n_features_to_select="auto",
101
+ tol=tol,
102
+ direction=direction,
103
+ cv=2,
104
+ )
105
+ sfs.fit(X, y)
106
+ selected_X = sfs.transform(X)
107
+
108
+ rng = np.random.RandomState(0)
109
+
110
+ added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True)))
111
+ added_X = np.hstack(
112
+ [
113
+ selected_X,
114
+ (X[:, rng.choice(added_candidates)])[:, np.newaxis],
115
+ ]
116
+ )
117
+
118
+ removed_candidate = rng.choice(list(range(sfs.n_features_to_select_)))
119
+ removed_X = np.delete(selected_X, removed_candidate, axis=1)
120
+
121
+ plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean()
122
+ sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean()
123
+ added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean()
124
+ removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean()
125
+
126
+ assert sfs_cv_score >= plain_cv_score
127
+
128
+ if direction == "forward":
129
+ assert (sfs_cv_score - added_cv_score) <= tol
130
+ assert (sfs_cv_score - removed_cv_score) >= tol
131
+ else:
132
+ assert (added_cv_score - sfs_cv_score) <= tol
133
+ assert (removed_cv_score - sfs_cv_score) <= tol
134
+
135
+
136
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
137
+ @pytest.mark.parametrize(
138
+ "n_features_to_select, expected",
139
+ (
140
+ (0.1, 1),
141
+ (1.0, 10),
142
+ (0.5, 5),
143
+ ),
144
+ )
145
+ def test_n_features_to_select_float(direction, n_features_to_select, expected):
146
+ # Test passing a float as n_features_to_select
147
+ X, y = make_regression(n_features=10)
148
+ sfs = SequentialFeatureSelector(
149
+ LinearRegression(),
150
+ n_features_to_select=n_features_to_select,
151
+ direction=direction,
152
+ cv=2,
153
+ )
154
+ sfs.fit(X, y)
155
+ assert sfs.n_features_to_select_ == expected
156
+
157
+
158
+ @pytest.mark.parametrize("seed", range(10))
159
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
160
+ @pytest.mark.parametrize(
161
+ "n_features_to_select, expected_selected_features",
162
+ [
163
+ (2, [0, 2]), # f1 is dropped since it has no predictive power
164
+ (1, [2]), # f2 is more predictive than f0 so it's kept
165
+ ],
166
+ )
167
+ def test_sanity(seed, direction, n_features_to_select, expected_selected_features):
168
+ # Basic sanity check: 3 features, only f0 and f2 are correlated with the
169
+ # target, f2 having a stronger correlation than f0. We expect f1 to be
170
+ # dropped, and f2 to always be selected.
171
+
172
+ rng = np.random.RandomState(seed)
173
+ n_samples = 100
174
+ X = rng.randn(n_samples, 3)
175
+ y = 3 * X[:, 0] - 10 * X[:, 2]
176
+
177
+ sfs = SequentialFeatureSelector(
178
+ LinearRegression(),
179
+ n_features_to_select=n_features_to_select,
180
+ direction=direction,
181
+ cv=2,
182
+ )
183
+ sfs.fit(X, y)
184
+ assert_array_equal(sfs.get_support(indices=True), expected_selected_features)
185
+
186
+
187
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
188
+ def test_sparse_support(csr_container):
189
+ # Make sure sparse data is supported
190
+
191
+ X, y = make_regression(n_features=10)
192
+ X = csr_container(X)
193
+ sfs = SequentialFeatureSelector(
194
+ LinearRegression(), n_features_to_select="auto", cv=2
195
+ )
196
+ sfs.fit(X, y)
197
+ sfs.transform(X)
198
+
199
+
200
+ def test_nan_support():
201
+ # Make sure nans are OK if the underlying estimator supports nans
202
+
203
+ rng = np.random.RandomState(0)
204
+ n_samples, n_features = 40, 4
205
+ X, y = make_regression(n_samples, n_features, random_state=0)
206
+ nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool)
207
+ X[nan_mask] = np.nan
208
+ sfs = SequentialFeatureSelector(
209
+ HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2
210
+ )
211
+ sfs.fit(X, y)
212
+ sfs.transform(X)
213
+
214
+ with pytest.raises(ValueError, match="Input X contains NaN"):
215
+ # LinearRegression does not support nans
216
+ SequentialFeatureSelector(
217
+ LinearRegression(), n_features_to_select="auto", cv=2
218
+ ).fit(X, y)
219
+
220
+
221
+ def test_pipeline_support():
222
+ # Make sure that pipelines can be passed into SFS and that SFS can be
223
+ # passed into a pipeline
224
+
225
+ n_samples, n_features = 50, 3
226
+ X, y = make_regression(n_samples, n_features, random_state=0)
227
+
228
+ # pipeline in SFS
229
+ pipe = make_pipeline(StandardScaler(), LinearRegression())
230
+ sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2)
231
+ sfs.fit(X, y)
232
+ sfs.transform(X)
233
+
234
+ # SFS in pipeline
235
+ sfs = SequentialFeatureSelector(
236
+ LinearRegression(), n_features_to_select="auto", cv=2
237
+ )
238
+ pipe = make_pipeline(StandardScaler(), sfs)
239
+ pipe.fit(X, y)
240
+ pipe.transform(X)
241
+
242
+
243
+ @pytest.mark.parametrize("n_features_to_select", (2, 3))
244
+ def test_unsupervised_model_fit(n_features_to_select):
245
+ # Make sure that models without classification labels are not being
246
+ # validated
247
+
248
+ X, y = make_blobs(n_features=4)
249
+ sfs = SequentialFeatureSelector(
250
+ KMeans(n_init=1),
251
+ n_features_to_select=n_features_to_select,
252
+ )
253
+ sfs.fit(X)
254
+ assert sfs.transform(X).shape[1] == n_features_to_select
255
+
256
+
257
+ @pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3))
258
+ def test_no_y_validation_model_fit(y):
259
+ # Make sure that other non-conventional y labels are not accepted
260
+
261
+ X, clusters = make_blobs(n_features=6)
262
+ sfs = SequentialFeatureSelector(
263
+ KMeans(),
264
+ n_features_to_select=3,
265
+ )
266
+
267
+ with pytest.raises((TypeError, ValueError)):
268
+ sfs.fit(X, y)
269
+
270
+
271
+ def test_forward_neg_tol_error():
272
+ """Check that we raise an error when tol<0 and direction='forward'"""
273
+ X, y = make_regression(n_features=10, random_state=0)
274
+ sfs = SequentialFeatureSelector(
275
+ LinearRegression(),
276
+ n_features_to_select="auto",
277
+ direction="forward",
278
+ tol=-1e-3,
279
+ )
280
+
281
+ with pytest.raises(ValueError, match="tol must be positive"):
282
+ sfs.fit(X, y)
283
+
284
+
285
+ def test_backward_neg_tol():
286
+ """Check that SequentialFeatureSelector works negative tol
287
+
288
+ non-regression test for #25525
289
+ """
290
+ X, y = make_regression(n_features=10, random_state=0)
291
+ lr = LinearRegression()
292
+ initial_score = lr.fit(X, y).score(X, y)
293
+
294
+ sfs = SequentialFeatureSelector(
295
+ lr,
296
+ n_features_to_select="auto",
297
+ direction="backward",
298
+ tol=-1e-3,
299
+ )
300
+ Xr = sfs.fit_transform(X, y)
301
+ new_score = lr.fit(Xr, y).score(Xr, y)
302
+
303
+ assert 0 < sfs.get_support().sum() < X.shape[1]
304
+ assert new_score < initial_score
305
+
306
+
307
+ def test_cv_generator_support():
308
+ """Check that no exception raised when cv is generator
309
+
310
+ non-regression test for #25957
311
+ """
312
+ X, y = make_classification(random_state=0)
313
+
314
+ groups = np.zeros_like(y, dtype=int)
315
+ groups[y.size // 2 :] = 1
316
+
317
+ cv = LeaveOneGroupOut()
318
+ splits = cv.split(X, y, groups=groups)
319
+
320
+ knc = KNeighborsClassifier(n_neighbors=5)
321
+
322
+ sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits)
323
+ sfs.fit(X, y)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.feature_selection import VarianceThreshold
5
+ from sklearn.utils._testing import assert_array_equal
6
+ from sklearn.utils.fixes import BSR_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
7
+
8
+ data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]]
9
+
10
+ data2 = [[-0.13725701]] * 10
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
15
+ )
16
+ def test_zero_variance(sparse_container):
17
+ # Test VarianceThreshold with default setting, zero variance.
18
+ X = data if sparse_container is None else sparse_container(data)
19
+ sel = VarianceThreshold().fit(X)
20
+ assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True))
21
+
22
+
23
+ def test_zero_variance_value_error():
24
+ # Test VarianceThreshold with default setting, zero variance, error cases.
25
+ with pytest.raises(ValueError):
26
+ VarianceThreshold().fit([[0, 1, 2, 3]])
27
+ with pytest.raises(ValueError):
28
+ VarianceThreshold().fit([[0, 1], [0, 1]])
29
+
30
+
31
+ @pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
32
+ def test_variance_threshold(sparse_container):
33
+ # Test VarianceThreshold with custom variance.
34
+ X = data if sparse_container is None else sparse_container(data)
35
+ X = VarianceThreshold(threshold=0.4).fit_transform(X)
36
+ assert (len(data), 1) == X.shape
37
+
38
+
39
+ @pytest.mark.skipif(
40
+ np.var(data2) == 0,
41
+ reason=(
42
+ "This test is not valid for this platform, "
43
+ "as it relies on numerical instabilities."
44
+ ),
45
+ )
46
+ @pytest.mark.parametrize(
47
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
48
+ )
49
+ def test_zero_variance_floating_point_error(sparse_container):
50
+ # Test that VarianceThreshold(0.0).fit eliminates features that have
51
+ # the same value in every sample, even when floating point errors
52
+ # cause np.var not to be 0 for the feature.
53
+ # See #13691
54
+ X = data2 if sparse_container is None else sparse_container(data2)
55
+ msg = "No feature in X meets the variance threshold 0.00000"
56
+ with pytest.raises(ValueError, match=msg):
57
+ VarianceThreshold().fit(X)
58
+
59
+
60
+ @pytest.mark.parametrize(
61
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
62
+ )
63
+ def test_variance_nan(sparse_container):
64
+ arr = np.array(data, dtype=np.float64)
65
+ # add single NaN and feature should still be included
66
+ arr[0, 0] = np.nan
67
+ # make all values in feature NaN and feature should be rejected
68
+ arr[:, 1] = np.nan
69
+
70
+ X = arr if sparse_container is None else sparse_container(arr)
71
+ sel = VarianceThreshold().fit(X)
72
+ assert_array_equal([0, 3, 4], sel.get_support(indices=True))
env-llmeval/lib/python3.10/site-packages/sklearn/impute/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Transformers for missing value imputation"""
2
+ import typing
3
+
4
+ from ._base import MissingIndicator, SimpleImputer
5
+ from ._knn import KNNImputer
6
+
7
+ if typing.TYPE_CHECKING:
8
+ # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
9
+ # TODO: remove this check once the estimator is no longer experimental.
10
+ from ._iterative import IterativeImputer # noqa
11
+
12
+ __all__ = ["MissingIndicator", "SimpleImputer", "KNNImputer"]
13
+
14
+
15
+ # TODO: remove this check once the estimator is no longer experimental.
16
+ def __getattr__(name):
17
+ if name == "IterativeImputer":
18
+ raise ImportError(
19
+ f"{name} is experimental and the API might change without any "
20
+ "deprecation cycle. To use it, you need to explicitly import "
21
+ "enable_iterative_imputer:\n"
22
+ "from sklearn.experimental import enable_iterative_imputer"
23
+ )
24
+ raise AttributeError(f"module {__name__} has no attribute {name}")
env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (903 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/_base.py ADDED
@@ -0,0 +1,1075 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Nicolas Tresegnie <[email protected]>
2
+ # Sergey Feldman <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import numbers
6
+ import warnings
7
+ from collections import Counter
8
+ from functools import partial
9
+
10
+ import numpy as np
11
+ import numpy.ma as ma
12
+ from scipy import sparse as sp
13
+
14
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
15
+ from ..utils import _is_pandas_na, is_scalar_nan
16
+ from ..utils._mask import _get_mask
17
+ from ..utils._param_validation import MissingValues, StrOptions
18
+ from ..utils.fixes import _mode
19
+ from ..utils.sparsefuncs import _get_median
20
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
21
+
22
+
23
+ def _check_inputs_dtype(X, missing_values):
24
+ if _is_pandas_na(missing_values):
25
+ # Allow using `pd.NA` as missing values to impute numerical arrays.
26
+ return
27
+ if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
28
+ raise ValueError(
29
+ "'X' and 'missing_values' types are expected to be"
30
+ " both numerical. Got X.dtype={} and "
31
+ " type(missing_values)={}.".format(X.dtype, type(missing_values))
32
+ )
33
+
34
+
35
+ def _most_frequent(array, extra_value, n_repeat):
36
+ """Compute the most frequent value in a 1d array extended with
37
+ [extra_value] * n_repeat, where extra_value is assumed to be not part
38
+ of the array."""
39
+ # Compute the most frequent value in array only
40
+ if array.size > 0:
41
+ if array.dtype == object:
42
+ # scipy.stats.mode is slow with object dtype array.
43
+ # Python Counter is more efficient
44
+ counter = Counter(array)
45
+ most_frequent_count = counter.most_common(1)[0][1]
46
+ # tie breaking similarly to scipy.stats.mode
47
+ most_frequent_value = min(
48
+ value
49
+ for value, count in counter.items()
50
+ if count == most_frequent_count
51
+ )
52
+ else:
53
+ mode = _mode(array)
54
+ most_frequent_value = mode[0][0]
55
+ most_frequent_count = mode[1][0]
56
+ else:
57
+ most_frequent_value = 0
58
+ most_frequent_count = 0
59
+
60
+ # Compare to array + [extra_value] * n_repeat
61
+ if most_frequent_count == 0 and n_repeat == 0:
62
+ return np.nan
63
+ elif most_frequent_count < n_repeat:
64
+ return extra_value
65
+ elif most_frequent_count > n_repeat:
66
+ return most_frequent_value
67
+ elif most_frequent_count == n_repeat:
68
+ # tie breaking similarly to scipy.stats.mode
69
+ return min(most_frequent_value, extra_value)
70
+
71
+
72
+ class _BaseImputer(TransformerMixin, BaseEstimator):
73
+ """Base class for all imputers.
74
+
75
+ It adds automatically support for `add_indicator`.
76
+ """
77
+
78
+ _parameter_constraints: dict = {
79
+ "missing_values": [MissingValues()],
80
+ "add_indicator": ["boolean"],
81
+ "keep_empty_features": ["boolean"],
82
+ }
83
+
84
+ def __init__(
85
+ self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False
86
+ ):
87
+ self.missing_values = missing_values
88
+ self.add_indicator = add_indicator
89
+ self.keep_empty_features = keep_empty_features
90
+
91
+ def _fit_indicator(self, X):
92
+ """Fit a MissingIndicator."""
93
+ if self.add_indicator:
94
+ self.indicator_ = MissingIndicator(
95
+ missing_values=self.missing_values, error_on_new=False
96
+ )
97
+ self.indicator_._fit(X, precomputed=True)
98
+ else:
99
+ self.indicator_ = None
100
+
101
+ def _transform_indicator(self, X):
102
+ """Compute the indicator mask.'
103
+
104
+ Note that X must be the original data as passed to the imputer before
105
+ any imputation, since imputation may be done inplace in some cases.
106
+ """
107
+ if self.add_indicator:
108
+ if not hasattr(self, "indicator_"):
109
+ raise ValueError(
110
+ "Make sure to call _fit_indicator before _transform_indicator"
111
+ )
112
+ return self.indicator_.transform(X)
113
+
114
+ def _concatenate_indicator(self, X_imputed, X_indicator):
115
+ """Concatenate indicator mask with the imputed data."""
116
+ if not self.add_indicator:
117
+ return X_imputed
118
+
119
+ if sp.issparse(X_imputed):
120
+ # sp.hstack may result in different formats between sparse arrays and
121
+ # matrices; specify the format to keep consistent behavior
122
+ hstack = partial(sp.hstack, format=X_imputed.format)
123
+ else:
124
+ hstack = np.hstack
125
+
126
+ if X_indicator is None:
127
+ raise ValueError(
128
+ "Data from the missing indicator are not provided. Call "
129
+ "_fit_indicator and _transform_indicator in the imputer "
130
+ "implementation."
131
+ )
132
+
133
+ return hstack((X_imputed, X_indicator))
134
+
135
+ def _concatenate_indicator_feature_names_out(self, names, input_features):
136
+ if not self.add_indicator:
137
+ return names
138
+
139
+ indicator_names = self.indicator_.get_feature_names_out(input_features)
140
+ return np.concatenate([names, indicator_names])
141
+
142
+ def _more_tags(self):
143
+ return {"allow_nan": is_scalar_nan(self.missing_values)}
144
+
145
+
146
+ class SimpleImputer(_BaseImputer):
147
+ """Univariate imputer for completing missing values with simple strategies.
148
+
149
+ Replace missing values using a descriptive statistic (e.g. mean, median, or
150
+ most frequent) along each column, or using a constant value.
151
+
152
+ Read more in the :ref:`User Guide <impute>`.
153
+
154
+ .. versionadded:: 0.20
155
+ `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
156
+ estimator which is now removed.
157
+
158
+ Parameters
159
+ ----------
160
+ missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan
161
+ The placeholder for the missing values. All occurrences of
162
+ `missing_values` will be imputed. For pandas' dataframes with
163
+ nullable integer dtypes with missing values, `missing_values`
164
+ can be set to either `np.nan` or `pd.NA`.
165
+
166
+ strategy : str, default='mean'
167
+ The imputation strategy.
168
+
169
+ - If "mean", then replace missing values using the mean along
170
+ each column. Can only be used with numeric data.
171
+ - If "median", then replace missing values using the median along
172
+ each column. Can only be used with numeric data.
173
+ - If "most_frequent", then replace missing using the most frequent
174
+ value along each column. Can be used with strings or numeric data.
175
+ If there is more than one such value, only the smallest is returned.
176
+ - If "constant", then replace missing values with fill_value. Can be
177
+ used with strings or numeric data.
178
+
179
+ .. versionadded:: 0.20
180
+ strategy="constant" for fixed value imputation.
181
+
182
+ fill_value : str or numerical value, default=None
183
+ When strategy == "constant", `fill_value` is used to replace all
184
+ occurrences of missing_values. For string or object data types,
185
+ `fill_value` must be a string.
186
+ If `None`, `fill_value` will be 0 when imputing numerical
187
+ data and "missing_value" for strings or object data types.
188
+
189
+ copy : bool, default=True
190
+ If True, a copy of X will be created. If False, imputation will
191
+ be done in-place whenever possible. Note that, in the following cases,
192
+ a new copy will always be made, even if `copy=False`:
193
+
194
+ - If `X` is not an array of floating values;
195
+ - If `X` is encoded as a CSR matrix;
196
+ - If `add_indicator=True`.
197
+
198
+ add_indicator : bool, default=False
199
+ If True, a :class:`MissingIndicator` transform will stack onto output
200
+ of the imputer's transform. This allows a predictive estimator
201
+ to account for missingness despite imputation. If a feature has no
202
+ missing values at fit/train time, the feature won't appear on
203
+ the missing indicator even if there are missing values at
204
+ transform/test time.
205
+
206
+ keep_empty_features : bool, default=False
207
+ If True, features that consist exclusively of missing values when
208
+ `fit` is called are returned in results when `transform` is called.
209
+ The imputed value is always `0` except when `strategy="constant"`
210
+ in which case `fill_value` will be used instead.
211
+
212
+ .. versionadded:: 1.2
213
+
214
+ Attributes
215
+ ----------
216
+ statistics_ : array of shape (n_features,)
217
+ The imputation fill value for each feature.
218
+ Computing statistics can result in `np.nan` values.
219
+ During :meth:`transform`, features corresponding to `np.nan`
220
+ statistics will be discarded.
221
+
222
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
223
+ Indicator used to add binary indicators for missing values.
224
+ `None` if `add_indicator=False`.
225
+
226
+ n_features_in_ : int
227
+ Number of features seen during :term:`fit`.
228
+
229
+ .. versionadded:: 0.24
230
+
231
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
232
+ Names of features seen during :term:`fit`. Defined only when `X`
233
+ has feature names that are all strings.
234
+
235
+ .. versionadded:: 1.0
236
+
237
+ See Also
238
+ --------
239
+ IterativeImputer : Multivariate imputer that estimates values to impute for
240
+ each feature with missing values from all the others.
241
+ KNNImputer : Multivariate imputer that estimates missing features using
242
+ nearest samples.
243
+
244
+ Notes
245
+ -----
246
+ Columns which only contained missing values at :meth:`fit` are discarded
247
+ upon :meth:`transform` if strategy is not `"constant"`.
248
+
249
+ In a prediction context, simple imputation usually performs poorly when
250
+ associated with a weak learner. However, with a powerful learner, it can
251
+ lead to as good or better performance than complex imputation such as
252
+ :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`.
253
+
254
+ Examples
255
+ --------
256
+ >>> import numpy as np
257
+ >>> from sklearn.impute import SimpleImputer
258
+ >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
259
+ >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
260
+ SimpleImputer()
261
+ >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
262
+ >>> print(imp_mean.transform(X))
263
+ [[ 7. 2. 3. ]
264
+ [ 4. 3.5 6. ]
265
+ [10. 3.5 9. ]]
266
+
267
+ For a more detailed example see
268
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`.
269
+ """
270
+
271
+ _parameter_constraints: dict = {
272
+ **_BaseImputer._parameter_constraints,
273
+ "strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})],
274
+ "fill_value": "no_validation", # any object is valid
275
+ "copy": ["boolean"],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ *,
281
+ missing_values=np.nan,
282
+ strategy="mean",
283
+ fill_value=None,
284
+ copy=True,
285
+ add_indicator=False,
286
+ keep_empty_features=False,
287
+ ):
288
+ super().__init__(
289
+ missing_values=missing_values,
290
+ add_indicator=add_indicator,
291
+ keep_empty_features=keep_empty_features,
292
+ )
293
+ self.strategy = strategy
294
+ self.fill_value = fill_value
295
+ self.copy = copy
296
+
297
+ def _validate_input(self, X, in_fit):
298
+ if self.strategy in ("most_frequent", "constant"):
299
+ # If input is a list of strings, dtype = object.
300
+ # Otherwise ValueError is raised in SimpleImputer
301
+ # with strategy='most_frequent' or 'constant'
302
+ # because the list is converted to Unicode numpy array
303
+ if isinstance(X, list) and any(
304
+ isinstance(elem, str) for row in X for elem in row
305
+ ):
306
+ dtype = object
307
+ else:
308
+ dtype = None
309
+ else:
310
+ dtype = FLOAT_DTYPES
311
+
312
+ if not in_fit and self._fit_dtype.kind == "O":
313
+ # Use object dtype if fitted on object dtypes
314
+ dtype = self._fit_dtype
315
+
316
+ if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
317
+ force_all_finite = "allow-nan"
318
+ else:
319
+ force_all_finite = True
320
+
321
+ try:
322
+ X = self._validate_data(
323
+ X,
324
+ reset=in_fit,
325
+ accept_sparse="csc",
326
+ dtype=dtype,
327
+ force_all_finite=force_all_finite,
328
+ copy=self.copy,
329
+ )
330
+ except ValueError as ve:
331
+ if "could not convert" in str(ve):
332
+ new_ve = ValueError(
333
+ "Cannot use {} strategy with non-numeric data:\n{}".format(
334
+ self.strategy, ve
335
+ )
336
+ )
337
+ raise new_ve from None
338
+ else:
339
+ raise ve
340
+
341
+ if in_fit:
342
+ # Use the dtype seen in `fit` for non-`fit` conversion
343
+ self._fit_dtype = X.dtype
344
+
345
+ _check_inputs_dtype(X, self.missing_values)
346
+ if X.dtype.kind not in ("i", "u", "f", "O"):
347
+ raise ValueError(
348
+ "SimpleImputer does not support data with dtype "
349
+ "{0}. Please provide either a numeric array (with"
350
+ " a floating point or integer dtype) or "
351
+ "categorical data represented either as an array "
352
+ "with integer dtype or an array of string values "
353
+ "with an object dtype.".format(X.dtype)
354
+ )
355
+
356
+ if sp.issparse(X) and self.missing_values == 0:
357
+ # missing_values = 0 not allowed with sparse data as it would
358
+ # force densification
359
+ raise ValueError(
360
+ "Imputation not possible when missing_values "
361
+ "== 0 and input is sparse. Provide a dense "
362
+ "array instead."
363
+ )
364
+
365
+ if self.strategy == "constant":
366
+ if in_fit and self.fill_value is not None:
367
+ fill_value_dtype = type(self.fill_value)
368
+ err_msg = (
369
+ f"fill_value={self.fill_value!r} (of type {fill_value_dtype!r}) "
370
+ f"cannot be cast to the input data that is {X.dtype!r}. Make sure "
371
+ "that both dtypes are of the same kind."
372
+ )
373
+ elif not in_fit:
374
+ fill_value_dtype = self.statistics_.dtype
375
+ err_msg = (
376
+ f"The dtype of the filling value (i.e. {fill_value_dtype!r}) "
377
+ f"cannot be cast to the input data that is {X.dtype!r}. Make sure "
378
+ "that the dtypes of the input data is of the same kind between "
379
+ "fit and transform."
380
+ )
381
+ else:
382
+ # By default, fill_value=None, and the replacement is always
383
+ # compatible with the input data
384
+ fill_value_dtype = X.dtype
385
+
386
+ # Make sure we can safely cast fill_value dtype to the input data dtype
387
+ if not np.can_cast(fill_value_dtype, X.dtype, casting="same_kind"):
388
+ raise ValueError(err_msg)
389
+
390
+ return X
391
+
392
+ @_fit_context(prefer_skip_nested_validation=True)
393
+ def fit(self, X, y=None):
394
+ """Fit the imputer on `X`.
395
+
396
+ Parameters
397
+ ----------
398
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
399
+ Input data, where `n_samples` is the number of samples and
400
+ `n_features` is the number of features.
401
+
402
+ y : Ignored
403
+ Not used, present here for API consistency by convention.
404
+
405
+ Returns
406
+ -------
407
+ self : object
408
+ Fitted estimator.
409
+ """
410
+ X = self._validate_input(X, in_fit=True)
411
+
412
+ # default fill_value is 0 for numerical input and "missing_value"
413
+ # otherwise
414
+ if self.fill_value is None:
415
+ if X.dtype.kind in ("i", "u", "f"):
416
+ fill_value = 0
417
+ else:
418
+ fill_value = "missing_value"
419
+ else:
420
+ fill_value = self.fill_value
421
+
422
+ if sp.issparse(X):
423
+ self.statistics_ = self._sparse_fit(
424
+ X, self.strategy, self.missing_values, fill_value
425
+ )
426
+ else:
427
+ self.statistics_ = self._dense_fit(
428
+ X, self.strategy, self.missing_values, fill_value
429
+ )
430
+
431
+ return self
432
+
433
+ def _sparse_fit(self, X, strategy, missing_values, fill_value):
434
+ """Fit the transformer on sparse data."""
435
+ missing_mask = _get_mask(X, missing_values)
436
+ mask_data = missing_mask.data
437
+ n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
438
+
439
+ statistics = np.empty(X.shape[1])
440
+
441
+ if strategy == "constant":
442
+ # for constant strategy, self.statistics_ is used to store
443
+ # fill_value in each column
444
+ statistics.fill(fill_value)
445
+ else:
446
+ for i in range(X.shape[1]):
447
+ column = X.data[X.indptr[i] : X.indptr[i + 1]]
448
+ mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
449
+ column = column[~mask_column]
450
+
451
+ # combine explicit and implicit zeros
452
+ mask_zeros = _get_mask(column, 0)
453
+ column = column[~mask_zeros]
454
+ n_explicit_zeros = mask_zeros.sum()
455
+ n_zeros = n_implicit_zeros[i] + n_explicit_zeros
456
+
457
+ if len(column) == 0 and self.keep_empty_features:
458
+ # in case we want to keep columns with only missing values.
459
+ statistics[i] = 0
460
+ else:
461
+ if strategy == "mean":
462
+ s = column.size + n_zeros
463
+ statistics[i] = np.nan if s == 0 else column.sum() / s
464
+
465
+ elif strategy == "median":
466
+ statistics[i] = _get_median(column, n_zeros)
467
+
468
+ elif strategy == "most_frequent":
469
+ statistics[i] = _most_frequent(column, 0, n_zeros)
470
+
471
+ super()._fit_indicator(missing_mask)
472
+
473
+ return statistics
474
+
475
+ def _dense_fit(self, X, strategy, missing_values, fill_value):
476
+ """Fit the transformer on dense data."""
477
+ missing_mask = _get_mask(X, missing_values)
478
+ masked_X = ma.masked_array(X, mask=missing_mask)
479
+
480
+ super()._fit_indicator(missing_mask)
481
+
482
+ # Mean
483
+ if strategy == "mean":
484
+ mean_masked = np.ma.mean(masked_X, axis=0)
485
+ # Avoid the warning "Warning: converting a masked element to nan."
486
+ mean = np.ma.getdata(mean_masked)
487
+ mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
488
+
489
+ return mean
490
+
491
+ # Median
492
+ elif strategy == "median":
493
+ median_masked = np.ma.median(masked_X, axis=0)
494
+ # Avoid the warning "Warning: converting a masked element to nan."
495
+ median = np.ma.getdata(median_masked)
496
+ median[np.ma.getmaskarray(median_masked)] = (
497
+ 0 if self.keep_empty_features else np.nan
498
+ )
499
+
500
+ return median
501
+
502
+ # Most frequent
503
+ elif strategy == "most_frequent":
504
+ # Avoid use of scipy.stats.mstats.mode due to the required
505
+ # additional overhead and slow benchmarking performance.
506
+ # See Issue 14325 and PR 14399 for full discussion.
507
+
508
+ # To be able access the elements by columns
509
+ X = X.transpose()
510
+ mask = missing_mask.transpose()
511
+
512
+ if X.dtype.kind == "O":
513
+ most_frequent = np.empty(X.shape[0], dtype=object)
514
+ else:
515
+ most_frequent = np.empty(X.shape[0])
516
+
517
+ for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
518
+ row_mask = np.logical_not(row_mask).astype(bool)
519
+ row = row[row_mask]
520
+ if len(row) == 0 and self.keep_empty_features:
521
+ most_frequent[i] = 0
522
+ else:
523
+ most_frequent[i] = _most_frequent(row, np.nan, 0)
524
+
525
+ return most_frequent
526
+
527
+ # Constant
528
+ elif strategy == "constant":
529
+ # for constant strategy, self.statistcs_ is used to store
530
+ # fill_value in each column
531
+ return np.full(X.shape[1], fill_value, dtype=X.dtype)
532
+
533
+ def transform(self, X):
534
+ """Impute all missing values in `X`.
535
+
536
+ Parameters
537
+ ----------
538
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
539
+ The input data to complete.
540
+
541
+ Returns
542
+ -------
543
+ X_imputed : {ndarray, sparse matrix} of shape \
544
+ (n_samples, n_features_out)
545
+ `X` with imputed values.
546
+ """
547
+ check_is_fitted(self)
548
+
549
+ X = self._validate_input(X, in_fit=False)
550
+ statistics = self.statistics_
551
+
552
+ if X.shape[1] != statistics.shape[0]:
553
+ raise ValueError(
554
+ "X has %d features per sample, expected %d"
555
+ % (X.shape[1], self.statistics_.shape[0])
556
+ )
557
+
558
+ # compute mask before eliminating invalid features
559
+ missing_mask = _get_mask(X, self.missing_values)
560
+
561
+ # Decide whether to keep missing features
562
+ if self.strategy == "constant" or self.keep_empty_features:
563
+ valid_statistics = statistics
564
+ valid_statistics_indexes = None
565
+ else:
566
+ # same as np.isnan but also works for object dtypes
567
+ invalid_mask = _get_mask(statistics, np.nan)
568
+ valid_mask = np.logical_not(invalid_mask)
569
+ valid_statistics = statistics[valid_mask]
570
+ valid_statistics_indexes = np.flatnonzero(valid_mask)
571
+
572
+ if invalid_mask.any():
573
+ invalid_features = np.arange(X.shape[1])[invalid_mask]
574
+ # use feature names warning if features are provided
575
+ if hasattr(self, "feature_names_in_"):
576
+ invalid_features = self.feature_names_in_[invalid_features]
577
+ warnings.warn(
578
+ "Skipping features without any observed values:"
579
+ f" {invalid_features}. At least one non-missing value is needed"
580
+ f" for imputation with strategy='{self.strategy}'."
581
+ )
582
+ X = X[:, valid_statistics_indexes]
583
+
584
+ # Do actual imputation
585
+ if sp.issparse(X):
586
+ if self.missing_values == 0:
587
+ raise ValueError(
588
+ "Imputation not possible when missing_values "
589
+ "== 0 and input is sparse. Provide a dense "
590
+ "array instead."
591
+ )
592
+ else:
593
+ # if no invalid statistics are found, use the mask computed
594
+ # before, else recompute mask
595
+ if valid_statistics_indexes is None:
596
+ mask = missing_mask.data
597
+ else:
598
+ mask = _get_mask(X.data, self.missing_values)
599
+ indexes = np.repeat(
600
+ np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
601
+ )[mask]
602
+
603
+ X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
604
+ else:
605
+ # use mask computed before eliminating invalid mask
606
+ if valid_statistics_indexes is None:
607
+ mask_valid_features = missing_mask
608
+ else:
609
+ mask_valid_features = missing_mask[:, valid_statistics_indexes]
610
+ n_missing = np.sum(mask_valid_features, axis=0)
611
+ values = np.repeat(valid_statistics, n_missing)
612
+ coordinates = np.where(mask_valid_features.transpose())[::-1]
613
+
614
+ X[coordinates] = values
615
+
616
+ X_indicator = super()._transform_indicator(missing_mask)
617
+
618
+ return super()._concatenate_indicator(X, X_indicator)
619
+
620
+ def inverse_transform(self, X):
621
+ """Convert the data back to the original representation.
622
+
623
+ Inverts the `transform` operation performed on an array.
624
+ This operation can only be performed after :class:`SimpleImputer` is
625
+ instantiated with `add_indicator=True`.
626
+
627
+ Note that `inverse_transform` can only invert the transform in
628
+ features that have binary indicators for missing values. If a feature
629
+ has no missing values at `fit` time, the feature won't have a binary
630
+ indicator, and the imputation done at `transform` time won't be
631
+ inverted.
632
+
633
+ .. versionadded:: 0.24
634
+
635
+ Parameters
636
+ ----------
637
+ X : array-like of shape \
638
+ (n_samples, n_features + n_features_missing_indicator)
639
+ The imputed data to be reverted to original data. It has to be
640
+ an augmented array of imputed data and the missing indicator mask.
641
+
642
+ Returns
643
+ -------
644
+ X_original : ndarray of shape (n_samples, n_features)
645
+ The original `X` with missing values as it was prior
646
+ to imputation.
647
+ """
648
+ check_is_fitted(self)
649
+
650
+ if not self.add_indicator:
651
+ raise ValueError(
652
+ "'inverse_transform' works only when "
653
+ "'SimpleImputer' is instantiated with "
654
+ "'add_indicator=True'. "
655
+ f"Got 'add_indicator={self.add_indicator}' "
656
+ "instead."
657
+ )
658
+
659
+ n_features_missing = len(self.indicator_.features_)
660
+ non_empty_feature_count = X.shape[1] - n_features_missing
661
+ array_imputed = X[:, :non_empty_feature_count].copy()
662
+ missing_mask = X[:, non_empty_feature_count:].astype(bool)
663
+
664
+ n_features_original = len(self.statistics_)
665
+ shape_original = (X.shape[0], n_features_original)
666
+ X_original = np.zeros(shape_original)
667
+ X_original[:, self.indicator_.features_] = missing_mask
668
+ full_mask = X_original.astype(bool)
669
+
670
+ imputed_idx, original_idx = 0, 0
671
+ while imputed_idx < len(array_imputed.T):
672
+ if not np.all(X_original[:, original_idx]):
673
+ X_original[:, original_idx] = array_imputed.T[imputed_idx]
674
+ imputed_idx += 1
675
+ original_idx += 1
676
+ else:
677
+ original_idx += 1
678
+
679
+ X_original[full_mask] = self.missing_values
680
+ return X_original
681
+
682
+ def _more_tags(self):
683
+ return {
684
+ "allow_nan": _is_pandas_na(self.missing_values) or is_scalar_nan(
685
+ self.missing_values
686
+ )
687
+ }
688
+
689
+ def get_feature_names_out(self, input_features=None):
690
+ """Get output feature names for transformation.
691
+
692
+ Parameters
693
+ ----------
694
+ input_features : array-like of str or None, default=None
695
+ Input features.
696
+
697
+ - If `input_features` is `None`, then `feature_names_in_` is
698
+ used as feature names in. If `feature_names_in_` is not defined,
699
+ then the following input feature names are generated:
700
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
701
+ - If `input_features` is an array-like, then `input_features` must
702
+ match `feature_names_in_` if `feature_names_in_` is defined.
703
+
704
+ Returns
705
+ -------
706
+ feature_names_out : ndarray of str objects
707
+ Transformed feature names.
708
+ """
709
+ check_is_fitted(self, "n_features_in_")
710
+ input_features = _check_feature_names_in(self, input_features)
711
+ non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))
712
+ names = input_features[non_missing_mask]
713
+ return self._concatenate_indicator_feature_names_out(names, input_features)
714
+
715
+
716
+ class MissingIndicator(TransformerMixin, BaseEstimator):
717
+ """Binary indicators for missing values.
718
+
719
+ Note that this component typically should not be used in a vanilla
720
+ :class:`~sklearn.pipeline.Pipeline` consisting of transformers and a
721
+ classifier, but rather could be added using a
722
+ :class:`~sklearn.pipeline.FeatureUnion` or
723
+ :class:`~sklearn.compose.ColumnTransformer`.
724
+
725
+ Read more in the :ref:`User Guide <impute>`.
726
+
727
+ .. versionadded:: 0.20
728
+
729
+ Parameters
730
+ ----------
731
+ missing_values : int, float, str, np.nan or None, default=np.nan
732
+ The placeholder for the missing values. All occurrences of
733
+ `missing_values` will be imputed. For pandas' dataframes with
734
+ nullable integer dtypes with missing values, `missing_values`
735
+ should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
736
+
737
+ features : {'missing-only', 'all'}, default='missing-only'
738
+ Whether the imputer mask should represent all or a subset of
739
+ features.
740
+
741
+ - If `'missing-only'` (default), the imputer mask will only represent
742
+ features containing missing values during fit time.
743
+ - If `'all'`, the imputer mask will represent all features.
744
+
745
+ sparse : bool or 'auto', default='auto'
746
+ Whether the imputer mask format should be sparse or dense.
747
+
748
+ - If `'auto'` (default), the imputer mask will be of same type as
749
+ input.
750
+ - If `True`, the imputer mask will be a sparse matrix.
751
+ - If `False`, the imputer mask will be a numpy array.
752
+
753
+ error_on_new : bool, default=True
754
+ If `True`, :meth:`transform` will raise an error when there are
755
+ features with missing values that have no missing values in
756
+ :meth:`fit`. This is applicable only when `features='missing-only'`.
757
+
758
+ Attributes
759
+ ----------
760
+ features_ : ndarray of shape (n_missing_features,) or (n_features,)
761
+ The features indices which will be returned when calling
762
+ :meth:`transform`. They are computed during :meth:`fit`. If
763
+ `features='all'`, `features_` is equal to `range(n_features)`.
764
+
765
+ n_features_in_ : int
766
+ Number of features seen during :term:`fit`.
767
+
768
+ .. versionadded:: 0.24
769
+
770
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
771
+ Names of features seen during :term:`fit`. Defined only when `X`
772
+ has feature names that are all strings.
773
+
774
+ .. versionadded:: 1.0
775
+
776
+ See Also
777
+ --------
778
+ SimpleImputer : Univariate imputation of missing values.
779
+ IterativeImputer : Multivariate imputation of missing values.
780
+
781
+ Examples
782
+ --------
783
+ >>> import numpy as np
784
+ >>> from sklearn.impute import MissingIndicator
785
+ >>> X1 = np.array([[np.nan, 1, 3],
786
+ ... [4, 0, np.nan],
787
+ ... [8, 1, 0]])
788
+ >>> X2 = np.array([[5, 1, np.nan],
789
+ ... [np.nan, 2, 3],
790
+ ... [2, 4, 0]])
791
+ >>> indicator = MissingIndicator()
792
+ >>> indicator.fit(X1)
793
+ MissingIndicator()
794
+ >>> X2_tr = indicator.transform(X2)
795
+ >>> X2_tr
796
+ array([[False, True],
797
+ [ True, False],
798
+ [False, False]])
799
+ """
800
+
801
+ _parameter_constraints: dict = {
802
+ "missing_values": [MissingValues()],
803
+ "features": [StrOptions({"missing-only", "all"})],
804
+ "sparse": ["boolean", StrOptions({"auto"})],
805
+ "error_on_new": ["boolean"],
806
+ }
807
+
808
+ def __init__(
809
+ self,
810
+ *,
811
+ missing_values=np.nan,
812
+ features="missing-only",
813
+ sparse="auto",
814
+ error_on_new=True,
815
+ ):
816
+ self.missing_values = missing_values
817
+ self.features = features
818
+ self.sparse = sparse
819
+ self.error_on_new = error_on_new
820
+
821
+ def _get_missing_features_info(self, X):
822
+ """Compute the imputer mask and the indices of the features
823
+ containing missing values.
824
+
825
+ Parameters
826
+ ----------
827
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
828
+ The input data with missing values. Note that `X` has been
829
+ checked in :meth:`fit` and :meth:`transform` before to call this
830
+ function.
831
+
832
+ Returns
833
+ -------
834
+ imputer_mask : {ndarray, sparse matrix} of shape \
835
+ (n_samples, n_features)
836
+ The imputer mask of the original data.
837
+
838
+ features_with_missing : ndarray of shape (n_features_with_missing)
839
+ The features containing missing values.
840
+ """
841
+ if not self._precomputed:
842
+ imputer_mask = _get_mask(X, self.missing_values)
843
+ else:
844
+ imputer_mask = X
845
+
846
+ if sp.issparse(X):
847
+ imputer_mask.eliminate_zeros()
848
+
849
+ if self.features == "missing-only":
850
+ n_missing = imputer_mask.getnnz(axis=0)
851
+
852
+ if self.sparse is False:
853
+ imputer_mask = imputer_mask.toarray()
854
+ elif imputer_mask.format == "csr":
855
+ imputer_mask = imputer_mask.tocsc()
856
+ else:
857
+ if not self._precomputed:
858
+ imputer_mask = _get_mask(X, self.missing_values)
859
+ else:
860
+ imputer_mask = X
861
+
862
+ if self.features == "missing-only":
863
+ n_missing = imputer_mask.sum(axis=0)
864
+
865
+ if self.sparse is True:
866
+ imputer_mask = sp.csc_matrix(imputer_mask)
867
+
868
+ if self.features == "all":
869
+ features_indices = np.arange(X.shape[1])
870
+ else:
871
+ features_indices = np.flatnonzero(n_missing)
872
+
873
+ return imputer_mask, features_indices
874
+
875
+ def _validate_input(self, X, in_fit):
876
+ if not is_scalar_nan(self.missing_values):
877
+ force_all_finite = True
878
+ else:
879
+ force_all_finite = "allow-nan"
880
+ X = self._validate_data(
881
+ X,
882
+ reset=in_fit,
883
+ accept_sparse=("csc", "csr"),
884
+ dtype=None,
885
+ force_all_finite=force_all_finite,
886
+ )
887
+ _check_inputs_dtype(X, self.missing_values)
888
+ if X.dtype.kind not in ("i", "u", "f", "O"):
889
+ raise ValueError(
890
+ "MissingIndicator does not support data with "
891
+ "dtype {0}. Please provide either a numeric array"
892
+ " (with a floating point or integer dtype) or "
893
+ "categorical data represented either as an array "
894
+ "with integer dtype or an array of string values "
895
+ "with an object dtype.".format(X.dtype)
896
+ )
897
+
898
+ if sp.issparse(X) and self.missing_values == 0:
899
+ # missing_values = 0 not allowed with sparse data as it would
900
+ # force densification
901
+ raise ValueError(
902
+ "Sparse input with missing_values=0 is "
903
+ "not supported. Provide a dense "
904
+ "array instead."
905
+ )
906
+
907
+ return X
908
+
909
+ def _fit(self, X, y=None, precomputed=False):
910
+ """Fit the transformer on `X`.
911
+
912
+ Parameters
913
+ ----------
914
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
915
+ Input data, where `n_samples` is the number of samples and
916
+ `n_features` is the number of features.
917
+ If `precomputed=True`, then `X` is a mask of the input data.
918
+
919
+ precomputed : bool
920
+ Whether the input data is a mask.
921
+
922
+ Returns
923
+ -------
924
+ imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
925
+ n_features)
926
+ The imputer mask of the original data.
927
+ """
928
+ if precomputed:
929
+ if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
930
+ raise ValueError("precomputed is True but the input data is not a mask")
931
+ self._precomputed = True
932
+ else:
933
+ self._precomputed = False
934
+
935
+ # Need not validate X again as it would have already been validated
936
+ # in the Imputer calling MissingIndicator
937
+ if not self._precomputed:
938
+ X = self._validate_input(X, in_fit=True)
939
+ else:
940
+ # only create `n_features_in_` in the precomputed case
941
+ self._check_n_features(X, reset=True)
942
+
943
+ self._n_features = X.shape[1]
944
+
945
+ missing_features_info = self._get_missing_features_info(X)
946
+ self.features_ = missing_features_info[1]
947
+
948
+ return missing_features_info[0]
949
+
950
+ @_fit_context(prefer_skip_nested_validation=True)
951
+ def fit(self, X, y=None):
952
+ """Fit the transformer on `X`.
953
+
954
+ Parameters
955
+ ----------
956
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
957
+ Input data, where `n_samples` is the number of samples and
958
+ `n_features` is the number of features.
959
+
960
+ y : Ignored
961
+ Not used, present for API consistency by convention.
962
+
963
+ Returns
964
+ -------
965
+ self : object
966
+ Fitted estimator.
967
+ """
968
+ self._fit(X, y)
969
+
970
+ return self
971
+
972
+ def transform(self, X):
973
+ """Generate missing values indicator for `X`.
974
+
975
+ Parameters
976
+ ----------
977
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
978
+ The input data to complete.
979
+
980
+ Returns
981
+ -------
982
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
983
+ or (n_samples, n_features_with_missing)
984
+ The missing indicator for input data. The data type of `Xt`
985
+ will be boolean.
986
+ """
987
+ check_is_fitted(self)
988
+
989
+ # Need not validate X again as it would have already been validated
990
+ # in the Imputer calling MissingIndicator
991
+ if not self._precomputed:
992
+ X = self._validate_input(X, in_fit=False)
993
+ else:
994
+ if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
995
+ raise ValueError("precomputed is True but the input data is not a mask")
996
+
997
+ imputer_mask, features = self._get_missing_features_info(X)
998
+
999
+ if self.features == "missing-only":
1000
+ features_diff_fit_trans = np.setdiff1d(features, self.features_)
1001
+ if self.error_on_new and features_diff_fit_trans.size > 0:
1002
+ raise ValueError(
1003
+ "The features {} have missing values "
1004
+ "in transform but have no missing values "
1005
+ "in fit.".format(features_diff_fit_trans)
1006
+ )
1007
+
1008
+ if self.features_.size < self._n_features:
1009
+ imputer_mask = imputer_mask[:, self.features_]
1010
+
1011
+ return imputer_mask
1012
+
1013
+ @_fit_context(prefer_skip_nested_validation=True)
1014
+ def fit_transform(self, X, y=None):
1015
+ """Generate missing values indicator for `X`.
1016
+
1017
+ Parameters
1018
+ ----------
1019
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1020
+ The input data to complete.
1021
+
1022
+ y : Ignored
1023
+ Not used, present for API consistency by convention.
1024
+
1025
+ Returns
1026
+ -------
1027
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
1028
+ or (n_samples, n_features_with_missing)
1029
+ The missing indicator for input data. The data type of `Xt`
1030
+ will be boolean.
1031
+ """
1032
+ imputer_mask = self._fit(X, y)
1033
+
1034
+ if self.features_.size < self._n_features:
1035
+ imputer_mask = imputer_mask[:, self.features_]
1036
+
1037
+ return imputer_mask
1038
+
1039
+ def get_feature_names_out(self, input_features=None):
1040
+ """Get output feature names for transformation.
1041
+
1042
+ Parameters
1043
+ ----------
1044
+ input_features : array-like of str or None, default=None
1045
+ Input features.
1046
+
1047
+ - If `input_features` is `None`, then `feature_names_in_` is
1048
+ used as feature names in. If `feature_names_in_` is not defined,
1049
+ then the following input feature names are generated:
1050
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
1051
+ - If `input_features` is an array-like, then `input_features` must
1052
+ match `feature_names_in_` if `feature_names_in_` is defined.
1053
+
1054
+ Returns
1055
+ -------
1056
+ feature_names_out : ndarray of str objects
1057
+ Transformed feature names.
1058
+ """
1059
+ check_is_fitted(self, "n_features_in_")
1060
+ input_features = _check_feature_names_in(self, input_features)
1061
+ prefix = self.__class__.__name__.lower()
1062
+ return np.asarray(
1063
+ [
1064
+ f"{prefix}_{feature_name}"
1065
+ for feature_name in input_features[self.features_]
1066
+ ],
1067
+ dtype=object,
1068
+ )
1069
+
1070
+ def _more_tags(self):
1071
+ return {
1072
+ "allow_nan": True,
1073
+ "X_types": ["2darray", "string"],
1074
+ "preserves_dtype": [],
1075
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/impute/_iterative.py ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import namedtuple
3
+ from numbers import Integral, Real
4
+ from time import time
5
+
6
+ import numpy as np
7
+ from scipy import stats
8
+
9
+ from ..base import _fit_context, clone
10
+ from ..exceptions import ConvergenceWarning
11
+ from ..preprocessing import normalize
12
+ from ..utils import (
13
+ _safe_assign,
14
+ _safe_indexing,
15
+ check_array,
16
+ check_random_state,
17
+ is_scalar_nan,
18
+ )
19
+ from ..utils._mask import _get_mask
20
+ from ..utils._param_validation import HasMethods, Interval, StrOptions
21
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
22
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
23
+ from ._base import SimpleImputer, _BaseImputer, _check_inputs_dtype
24
+
25
+ _ImputerTriplet = namedtuple(
26
+ "_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"]
27
+ )
28
+
29
+
30
+ def _assign_where(X1, X2, cond):
31
+ """Assign X2 to X1 where cond is True.
32
+
33
+ Parameters
34
+ ----------
35
+ X1 : ndarray or dataframe of shape (n_samples, n_features)
36
+ Data.
37
+
38
+ X2 : ndarray of shape (n_samples, n_features)
39
+ Data to be assigned.
40
+
41
+ cond : ndarray of shape (n_samples, n_features)
42
+ Boolean mask to assign data.
43
+ """
44
+ if hasattr(X1, "mask"): # pandas dataframes
45
+ X1.mask(cond=cond, other=X2, inplace=True)
46
+ else: # ndarrays
47
+ X1[cond] = X2[cond]
48
+
49
+
50
+ class IterativeImputer(_RoutingNotSupportedMixin, _BaseImputer):
51
+ """Multivariate imputer that estimates each feature from all the others.
52
+
53
+ A strategy for imputing missing values by modeling each feature with
54
+ missing values as a function of other features in a round-robin fashion.
55
+
56
+ Read more in the :ref:`User Guide <iterative_imputer>`.
57
+
58
+ .. versionadded:: 0.21
59
+
60
+ .. note::
61
+
62
+ This estimator is still **experimental** for now: the predictions
63
+ and the API might change without any deprecation cycle. To use it,
64
+ you need to explicitly import `enable_iterative_imputer`::
65
+
66
+ >>> # explicitly require this experimental feature
67
+ >>> from sklearn.experimental import enable_iterative_imputer # noqa
68
+ >>> # now you can import normally from sklearn.impute
69
+ >>> from sklearn.impute import IterativeImputer
70
+
71
+ Parameters
72
+ ----------
73
+ estimator : estimator object, default=BayesianRidge()
74
+ The estimator to use at each step of the round-robin imputation.
75
+ If `sample_posterior=True`, the estimator must support
76
+ `return_std` in its `predict` method.
77
+
78
+ missing_values : int or np.nan, default=np.nan
79
+ The placeholder for the missing values. All occurrences of
80
+ `missing_values` will be imputed. For pandas' dataframes with
81
+ nullable integer dtypes with missing values, `missing_values`
82
+ should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
83
+
84
+ sample_posterior : bool, default=False
85
+ Whether to sample from the (Gaussian) predictive posterior of the
86
+ fitted estimator for each imputation. Estimator must support
87
+ `return_std` in its `predict` method if set to `True`. Set to
88
+ `True` if using `IterativeImputer` for multiple imputations.
89
+
90
+ max_iter : int, default=10
91
+ Maximum number of imputation rounds to perform before returning the
92
+ imputations computed during the final round. A round is a single
93
+ imputation of each feature with missing values. The stopping criterion
94
+ is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`,
95
+ where `X_t` is `X` at iteration `t`. Note that early stopping is only
96
+ applied if `sample_posterior=False`.
97
+
98
+ tol : float, default=1e-3
99
+ Tolerance of the stopping condition.
100
+
101
+ n_nearest_features : int, default=None
102
+ Number of other features to use to estimate the missing values of
103
+ each feature column. Nearness between features is measured using
104
+ the absolute correlation coefficient between each feature pair (after
105
+ initial imputation). To ensure coverage of features throughout the
106
+ imputation process, the neighbor features are not necessarily nearest,
107
+ but are drawn with probability proportional to correlation for each
108
+ imputed target feature. Can provide significant speed-up when the
109
+ number of features is huge. If `None`, all features will be used.
110
+
111
+ initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \
112
+ default='mean'
113
+ Which strategy to use to initialize the missing values. Same as the
114
+ `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`.
115
+
116
+ fill_value : str or numerical value, default=None
117
+ When `strategy="constant"`, `fill_value` is used to replace all
118
+ occurrences of missing_values. For string or object data types,
119
+ `fill_value` must be a string.
120
+ If `None`, `fill_value` will be 0 when imputing numerical
121
+ data and "missing_value" for strings or object data types.
122
+
123
+ .. versionadded:: 1.3
124
+
125
+ imputation_order : {'ascending', 'descending', 'roman', 'arabic', \
126
+ 'random'}, default='ascending'
127
+ The order in which the features will be imputed. Possible values:
128
+
129
+ - `'ascending'`: From features with fewest missing values to most.
130
+ - `'descending'`: From features with most missing values to fewest.
131
+ - `'roman'`: Left to right.
132
+ - `'arabic'`: Right to left.
133
+ - `'random'`: A random order for each round.
134
+
135
+ skip_complete : bool, default=False
136
+ If `True` then features with missing values during :meth:`transform`
137
+ which did not have any missing values during :meth:`fit` will be
138
+ imputed with the initial imputation method only. Set to `True` if you
139
+ have many features with no missing values at both :meth:`fit` and
140
+ :meth:`transform` time to save compute.
141
+
142
+ min_value : float or array-like of shape (n_features,), default=-np.inf
143
+ Minimum possible imputed value. Broadcast to shape `(n_features,)` if
144
+ scalar. If array-like, expects shape `(n_features,)`, one min value for
145
+ each feature. The default is `-np.inf`.
146
+
147
+ .. versionchanged:: 0.23
148
+ Added support for array-like.
149
+
150
+ max_value : float or array-like of shape (n_features,), default=np.inf
151
+ Maximum possible imputed value. Broadcast to shape `(n_features,)` if
152
+ scalar. If array-like, expects shape `(n_features,)`, one max value for
153
+ each feature. The default is `np.inf`.
154
+
155
+ .. versionchanged:: 0.23
156
+ Added support for array-like.
157
+
158
+ verbose : int, default=0
159
+ Verbosity flag, controls the debug messages that are issued
160
+ as functions are evaluated. The higher, the more verbose. Can be 0, 1,
161
+ or 2.
162
+
163
+ random_state : int, RandomState instance or None, default=None
164
+ The seed of the pseudo random number generator to use. Randomizes
165
+ selection of estimator features if `n_nearest_features` is not `None`,
166
+ the `imputation_order` if `random`, and the sampling from posterior if
167
+ `sample_posterior=True`. Use an integer for determinism.
168
+ See :term:`the Glossary <random_state>`.
169
+
170
+ add_indicator : bool, default=False
171
+ If `True`, a :class:`MissingIndicator` transform will stack onto output
172
+ of the imputer's transform. This allows a predictive estimator
173
+ to account for missingness despite imputation. If a feature has no
174
+ missing values at fit/train time, the feature won't appear on
175
+ the missing indicator even if there are missing values at
176
+ transform/test time.
177
+
178
+ keep_empty_features : bool, default=False
179
+ If True, features that consist exclusively of missing values when
180
+ `fit` is called are returned in results when `transform` is called.
181
+ The imputed value is always `0` except when
182
+ `initial_strategy="constant"` in which case `fill_value` will be
183
+ used instead.
184
+
185
+ .. versionadded:: 1.2
186
+
187
+ Attributes
188
+ ----------
189
+ initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer`
190
+ Imputer used to initialize the missing values.
191
+
192
+ imputation_sequence_ : list of tuples
193
+ Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where
194
+ `feat_idx` is the current feature to be imputed,
195
+ `neighbor_feat_idx` is the array of other features used to impute the
196
+ current feature, and `estimator` is the trained estimator used for
197
+ the imputation. Length is `self.n_features_with_missing_ *
198
+ self.n_iter_`.
199
+
200
+ n_iter_ : int
201
+ Number of iteration rounds that occurred. Will be less than
202
+ `self.max_iter` if early stopping criterion was reached.
203
+
204
+ n_features_in_ : int
205
+ Number of features seen during :term:`fit`.
206
+
207
+ .. versionadded:: 0.24
208
+
209
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
210
+ Names of features seen during :term:`fit`. Defined only when `X`
211
+ has feature names that are all strings.
212
+
213
+ .. versionadded:: 1.0
214
+
215
+ n_features_with_missing_ : int
216
+ Number of features with missing values.
217
+
218
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
219
+ Indicator used to add binary indicators for missing values.
220
+ `None` if `add_indicator=False`.
221
+
222
+ random_state_ : RandomState instance
223
+ RandomState instance that is generated either from a seed, the random
224
+ number generator or by `np.random`.
225
+
226
+ See Also
227
+ --------
228
+ SimpleImputer : Univariate imputer for completing missing values
229
+ with simple strategies.
230
+ KNNImputer : Multivariate imputer that estimates missing features using
231
+ nearest samples.
232
+
233
+ Notes
234
+ -----
235
+ To support imputation in inductive mode we store each feature's estimator
236
+ during the :meth:`fit` phase, and predict without refitting (in order)
237
+ during the :meth:`transform` phase.
238
+
239
+ Features which contain all missing values at :meth:`fit` are discarded upon
240
+ :meth:`transform`.
241
+
242
+ Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))`
243
+ where :math:`k` = `max_iter`, :math:`n` the number of samples and
244
+ :math:`p` the number of features. It thus becomes prohibitively costly when
245
+ the number of features increases. Setting
246
+ `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol`
247
+ can help to reduce its computational cost.
248
+
249
+ Depending on the nature of missing values, simple imputers can be
250
+ preferable in a prediction context.
251
+
252
+ References
253
+ ----------
254
+ .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
255
+ Multivariate Imputation by Chained Equations in R". Journal of
256
+ Statistical Software 45: 1-67.
257
+ <https://www.jstatsoft.org/article/view/v045i03>`_
258
+
259
+ .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in
260
+ Multivariate Data Suitable for use with an Electronic Computer".
261
+ Journal of the Royal Statistical Society 22(2): 302-306.
262
+ <https://www.jstor.org/stable/2984099>`_
263
+
264
+ Examples
265
+ --------
266
+ >>> import numpy as np
267
+ >>> from sklearn.experimental import enable_iterative_imputer
268
+ >>> from sklearn.impute import IterativeImputer
269
+ >>> imp_mean = IterativeImputer(random_state=0)
270
+ >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
271
+ IterativeImputer(random_state=0)
272
+ >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
273
+ >>> imp_mean.transform(X)
274
+ array([[ 6.9584..., 2. , 3. ],
275
+ [ 4. , 2.6000..., 6. ],
276
+ [10. , 4.9999..., 9. ]])
277
+
278
+ For a more detailed example see
279
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py` or
280
+ :ref:`sphx_glr_auto_examples_impute_plot_iterative_imputer_variants_comparison.py`.
281
+ """
282
+
283
+ _parameter_constraints: dict = {
284
+ **_BaseImputer._parameter_constraints,
285
+ "estimator": [None, HasMethods(["fit", "predict"])],
286
+ "sample_posterior": ["boolean"],
287
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
288
+ "tol": [Interval(Real, 0, None, closed="left")],
289
+ "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")],
290
+ "initial_strategy": [
291
+ StrOptions({"mean", "median", "most_frequent", "constant"})
292
+ ],
293
+ "fill_value": "no_validation", # any object is valid
294
+ "imputation_order": [
295
+ StrOptions({"ascending", "descending", "roman", "arabic", "random"})
296
+ ],
297
+ "skip_complete": ["boolean"],
298
+ "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
299
+ "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
300
+ "verbose": ["verbose"],
301
+ "random_state": ["random_state"],
302
+ }
303
+
304
+ def __init__(
305
+ self,
306
+ estimator=None,
307
+ *,
308
+ missing_values=np.nan,
309
+ sample_posterior=False,
310
+ max_iter=10,
311
+ tol=1e-3,
312
+ n_nearest_features=None,
313
+ initial_strategy="mean",
314
+ fill_value=None,
315
+ imputation_order="ascending",
316
+ skip_complete=False,
317
+ min_value=-np.inf,
318
+ max_value=np.inf,
319
+ verbose=0,
320
+ random_state=None,
321
+ add_indicator=False,
322
+ keep_empty_features=False,
323
+ ):
324
+ super().__init__(
325
+ missing_values=missing_values,
326
+ add_indicator=add_indicator,
327
+ keep_empty_features=keep_empty_features,
328
+ )
329
+
330
+ self.estimator = estimator
331
+ self.sample_posterior = sample_posterior
332
+ self.max_iter = max_iter
333
+ self.tol = tol
334
+ self.n_nearest_features = n_nearest_features
335
+ self.initial_strategy = initial_strategy
336
+ self.fill_value = fill_value
337
+ self.imputation_order = imputation_order
338
+ self.skip_complete = skip_complete
339
+ self.min_value = min_value
340
+ self.max_value = max_value
341
+ self.verbose = verbose
342
+ self.random_state = random_state
343
+
344
+ def _impute_one_feature(
345
+ self,
346
+ X_filled,
347
+ mask_missing_values,
348
+ feat_idx,
349
+ neighbor_feat_idx,
350
+ estimator=None,
351
+ fit_mode=True,
352
+ ):
353
+ """Impute a single feature from the others provided.
354
+
355
+ This function predicts the missing values of one of the features using
356
+ the current estimates of all the other features. The `estimator` must
357
+ support `return_std=True` in its `predict` method for this function
358
+ to work.
359
+
360
+ Parameters
361
+ ----------
362
+ X_filled : ndarray
363
+ Input data with the most recent imputations.
364
+
365
+ mask_missing_values : ndarray
366
+ Input data's missing indicator matrix.
367
+
368
+ feat_idx : int
369
+ Index of the feature currently being imputed.
370
+
371
+ neighbor_feat_idx : ndarray
372
+ Indices of the features to be used in imputing `feat_idx`.
373
+
374
+ estimator : object
375
+ The estimator to use at this step of the round-robin imputation.
376
+ If `sample_posterior=True`, the estimator must support
377
+ `return_std` in its `predict` method.
378
+ If None, it will be cloned from self._estimator.
379
+
380
+ fit_mode : boolean, default=True
381
+ Whether to fit and predict with the estimator or just predict.
382
+
383
+ Returns
384
+ -------
385
+ X_filled : ndarray
386
+ Input data with `X_filled[missing_row_mask, feat_idx]` updated.
387
+
388
+ estimator : estimator with sklearn API
389
+ The fitted estimator used to impute
390
+ `X_filled[missing_row_mask, feat_idx]`.
391
+ """
392
+ if estimator is None and fit_mode is False:
393
+ raise ValueError(
394
+ "If fit_mode is False, then an already-fitted "
395
+ "estimator should be passed in."
396
+ )
397
+
398
+ if estimator is None:
399
+ estimator = clone(self._estimator)
400
+
401
+ missing_row_mask = mask_missing_values[:, feat_idx]
402
+ if fit_mode:
403
+ X_train = _safe_indexing(
404
+ _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
405
+ ~missing_row_mask,
406
+ axis=0,
407
+ )
408
+ y_train = _safe_indexing(
409
+ _safe_indexing(X_filled, feat_idx, axis=1),
410
+ ~missing_row_mask,
411
+ axis=0,
412
+ )
413
+ estimator.fit(X_train, y_train)
414
+
415
+ # if no missing values, don't predict
416
+ if np.sum(missing_row_mask) == 0:
417
+ return X_filled, estimator
418
+
419
+ # get posterior samples if there is at least one missing value
420
+ X_test = _safe_indexing(
421
+ _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
422
+ missing_row_mask,
423
+ axis=0,
424
+ )
425
+ if self.sample_posterior:
426
+ mus, sigmas = estimator.predict(X_test, return_std=True)
427
+ imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
428
+ # two types of problems: (1) non-positive sigmas
429
+ # (2) mus outside legal range of min_value and max_value
430
+ # (results in inf sample)
431
+ positive_sigmas = sigmas > 0
432
+ imputed_values[~positive_sigmas] = mus[~positive_sigmas]
433
+ mus_too_low = mus < self._min_value[feat_idx]
434
+ imputed_values[mus_too_low] = self._min_value[feat_idx]
435
+ mus_too_high = mus > self._max_value[feat_idx]
436
+ imputed_values[mus_too_high] = self._max_value[feat_idx]
437
+ # the rest can be sampled without statistical issues
438
+ inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
439
+ mus = mus[inrange_mask]
440
+ sigmas = sigmas[inrange_mask]
441
+ a = (self._min_value[feat_idx] - mus) / sigmas
442
+ b = (self._max_value[feat_idx] - mus) / sigmas
443
+
444
+ truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
445
+ imputed_values[inrange_mask] = truncated_normal.rvs(
446
+ random_state=self.random_state_
447
+ )
448
+ else:
449
+ imputed_values = estimator.predict(X_test)
450
+ imputed_values = np.clip(
451
+ imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]
452
+ )
453
+
454
+ # update the feature
455
+ _safe_assign(
456
+ X_filled,
457
+ imputed_values,
458
+ row_indexer=missing_row_mask,
459
+ column_indexer=feat_idx,
460
+ )
461
+ return X_filled, estimator
462
+
463
+ def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):
464
+ """Get a list of other features to predict `feat_idx`.
465
+
466
+ If `self.n_nearest_features` is less than or equal to the total
467
+ number of features, then use a probability proportional to the absolute
468
+ correlation between `feat_idx` and each other feature to randomly
469
+ choose a subsample of the other features (without replacement).
470
+
471
+ Parameters
472
+ ----------
473
+ n_features : int
474
+ Number of features in `X`.
475
+
476
+ feat_idx : int
477
+ Index of the feature currently being imputed.
478
+
479
+ abs_corr_mat : ndarray, shape (n_features, n_features)
480
+ Absolute correlation matrix of `X`. The diagonal has been zeroed
481
+ out and each feature has been normalized to sum to 1. Can be None.
482
+
483
+ Returns
484
+ -------
485
+ neighbor_feat_idx : array-like
486
+ The features to use to impute `feat_idx`.
487
+ """
488
+ if self.n_nearest_features is not None and self.n_nearest_features < n_features:
489
+ p = abs_corr_mat[:, feat_idx]
490
+ neighbor_feat_idx = self.random_state_.choice(
491
+ np.arange(n_features), self.n_nearest_features, replace=False, p=p
492
+ )
493
+ else:
494
+ inds_left = np.arange(feat_idx)
495
+ inds_right = np.arange(feat_idx + 1, n_features)
496
+ neighbor_feat_idx = np.concatenate((inds_left, inds_right))
497
+ return neighbor_feat_idx
498
+
499
+ def _get_ordered_idx(self, mask_missing_values):
500
+ """Decide in what order we will update the features.
501
+
502
+ As a homage to the MICE R package, we will have 4 main options of
503
+ how to order the updates, and use a random order if anything else
504
+ is specified.
505
+
506
+ Also, this function skips features which have no missing values.
507
+
508
+ Parameters
509
+ ----------
510
+ mask_missing_values : array-like, shape (n_samples, n_features)
511
+ Input data's missing indicator matrix, where `n_samples` is the
512
+ number of samples and `n_features` is the number of features.
513
+
514
+ Returns
515
+ -------
516
+ ordered_idx : ndarray, shape (n_features,)
517
+ The order in which to impute the features.
518
+ """
519
+ frac_of_missing_values = mask_missing_values.mean(axis=0)
520
+ if self.skip_complete:
521
+ missing_values_idx = np.flatnonzero(frac_of_missing_values)
522
+ else:
523
+ missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
524
+ if self.imputation_order == "roman":
525
+ ordered_idx = missing_values_idx
526
+ elif self.imputation_order == "arabic":
527
+ ordered_idx = missing_values_idx[::-1]
528
+ elif self.imputation_order == "ascending":
529
+ n = len(frac_of_missing_values) - len(missing_values_idx)
530
+ ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:]
531
+ elif self.imputation_order == "descending":
532
+ n = len(frac_of_missing_values) - len(missing_values_idx)
533
+ ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1]
534
+ elif self.imputation_order == "random":
535
+ ordered_idx = missing_values_idx
536
+ self.random_state_.shuffle(ordered_idx)
537
+ return ordered_idx
538
+
539
+ def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
540
+ """Get absolute correlation matrix between features.
541
+
542
+ Parameters
543
+ ----------
544
+ X_filled : ndarray, shape (n_samples, n_features)
545
+ Input data with the most recent imputations.
546
+
547
+ tolerance : float, default=1e-6
548
+ `abs_corr_mat` can have nans, which will be replaced
549
+ with `tolerance`.
550
+
551
+ Returns
552
+ -------
553
+ abs_corr_mat : ndarray, shape (n_features, n_features)
554
+ Absolute correlation matrix of `X` at the beginning of the
555
+ current round. The diagonal has been zeroed out and each feature's
556
+ absolute correlations with all others have been normalized to sum
557
+ to 1.
558
+ """
559
+ n_features = X_filled.shape[1]
560
+ if self.n_nearest_features is None or self.n_nearest_features >= n_features:
561
+ return None
562
+ with np.errstate(invalid="ignore"):
563
+ # if a feature in the neighborhood has only a single value
564
+ # (e.g., categorical feature), the std. dev. will be null and
565
+ # np.corrcoef will raise a warning due to a division by zero
566
+ abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
567
+ # np.corrcoef is not defined for features with zero std
568
+ abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
569
+ # ensures exploration, i.e. at least some probability of sampling
570
+ np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
571
+ # features are not their own neighbors
572
+ np.fill_diagonal(abs_corr_mat, 0)
573
+ # needs to sum to 1 for np.random.choice sampling
574
+ abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False)
575
+ return abs_corr_mat
576
+
577
+ def _initial_imputation(self, X, in_fit=False):
578
+ """Perform initial imputation for input `X`.
579
+
580
+ Parameters
581
+ ----------
582
+ X : ndarray of shape (n_samples, n_features)
583
+ Input data, where `n_samples` is the number of samples and
584
+ `n_features` is the number of features.
585
+
586
+ in_fit : bool, default=False
587
+ Whether function is called in :meth:`fit`.
588
+
589
+ Returns
590
+ -------
591
+ Xt : ndarray of shape (n_samples, n_features)
592
+ Input data, where `n_samples` is the number of samples and
593
+ `n_features` is the number of features.
594
+
595
+ X_filled : ndarray of shape (n_samples, n_features)
596
+ Input data with the most recent imputations.
597
+
598
+ mask_missing_values : ndarray of shape (n_samples, n_features)
599
+ Input data's missing indicator matrix, where `n_samples` is the
600
+ number of samples and `n_features` is the number of features,
601
+ masked by non-missing features.
602
+
603
+ X_missing_mask : ndarray, shape (n_samples, n_features)
604
+ Input data's mask matrix indicating missing datapoints, where
605
+ `n_samples` is the number of samples and `n_features` is the
606
+ number of features.
607
+ """
608
+ if is_scalar_nan(self.missing_values):
609
+ force_all_finite = "allow-nan"
610
+ else:
611
+ force_all_finite = True
612
+
613
+ X = self._validate_data(
614
+ X,
615
+ dtype=FLOAT_DTYPES,
616
+ order="F",
617
+ reset=in_fit,
618
+ force_all_finite=force_all_finite,
619
+ )
620
+ _check_inputs_dtype(X, self.missing_values)
621
+
622
+ X_missing_mask = _get_mask(X, self.missing_values)
623
+ mask_missing_values = X_missing_mask.copy()
624
+ if self.initial_imputer_ is None:
625
+ self.initial_imputer_ = SimpleImputer(
626
+ missing_values=self.missing_values,
627
+ strategy=self.initial_strategy,
628
+ fill_value=self.fill_value,
629
+ keep_empty_features=self.keep_empty_features,
630
+ ).set_output(transform="default")
631
+ X_filled = self.initial_imputer_.fit_transform(X)
632
+ else:
633
+ X_filled = self.initial_imputer_.transform(X)
634
+
635
+ valid_mask = np.flatnonzero(
636
+ np.logical_not(np.isnan(self.initial_imputer_.statistics_))
637
+ )
638
+
639
+ if not self.keep_empty_features:
640
+ # drop empty features
641
+ Xt = X[:, valid_mask]
642
+ mask_missing_values = mask_missing_values[:, valid_mask]
643
+ else:
644
+ # mark empty features as not missing and keep the original
645
+ # imputation
646
+ mask_missing_values[:, valid_mask] = True
647
+ Xt = X
648
+
649
+ return Xt, X_filled, mask_missing_values, X_missing_mask
650
+
651
+ @staticmethod
652
+ def _validate_limit(limit, limit_type, n_features):
653
+ """Validate the limits (min/max) of the feature values.
654
+
655
+ Converts scalar min/max limits to vectors of shape `(n_features,)`.
656
+
657
+ Parameters
658
+ ----------
659
+ limit: scalar or array-like
660
+ The user-specified limit (i.e, min_value or max_value).
661
+ limit_type: {'max', 'min'}
662
+ Type of limit to validate.
663
+ n_features: int
664
+ Number of features in the dataset.
665
+
666
+ Returns
667
+ -------
668
+ limit: ndarray, shape(n_features,)
669
+ Array of limits, one for each feature.
670
+ """
671
+ limit_bound = np.inf if limit_type == "max" else -np.inf
672
+ limit = limit_bound if limit is None else limit
673
+ if np.isscalar(limit):
674
+ limit = np.full(n_features, limit)
675
+ limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False)
676
+ if not limit.shape[0] == n_features:
677
+ raise ValueError(
678
+ f"'{limit_type}_value' should be of "
679
+ f"shape ({n_features},) when an array-like "
680
+ f"is provided. Got {limit.shape}, instead."
681
+ )
682
+ return limit
683
+
684
+ @_fit_context(
685
+ # IterativeImputer.estimator is not validated yet
686
+ prefer_skip_nested_validation=False
687
+ )
688
+ def fit_transform(self, X, y=None):
689
+ """Fit the imputer on `X` and return the transformed `X`.
690
+
691
+ Parameters
692
+ ----------
693
+ X : array-like, shape (n_samples, n_features)
694
+ Input data, where `n_samples` is the number of samples and
695
+ `n_features` is the number of features.
696
+
697
+ y : Ignored
698
+ Not used, present for API consistency by convention.
699
+
700
+ Returns
701
+ -------
702
+ Xt : array-like, shape (n_samples, n_features)
703
+ The imputed input data.
704
+ """
705
+ self.random_state_ = getattr(
706
+ self, "random_state_", check_random_state(self.random_state)
707
+ )
708
+
709
+ if self.estimator is None:
710
+ from ..linear_model import BayesianRidge
711
+
712
+ self._estimator = BayesianRidge()
713
+ else:
714
+ self._estimator = clone(self.estimator)
715
+
716
+ self.imputation_sequence_ = []
717
+
718
+ self.initial_imputer_ = None
719
+
720
+ X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
721
+ X, in_fit=True
722
+ )
723
+
724
+ super()._fit_indicator(complete_mask)
725
+ X_indicator = super()._transform_indicator(complete_mask)
726
+
727
+ if self.max_iter == 0 or np.all(mask_missing_values):
728
+ self.n_iter_ = 0
729
+ return super()._concatenate_indicator(Xt, X_indicator)
730
+
731
+ # Edge case: a single feature. We return the initial ...
732
+ if Xt.shape[1] == 1:
733
+ self.n_iter_ = 0
734
+ return super()._concatenate_indicator(Xt, X_indicator)
735
+
736
+ self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
737
+ self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
738
+
739
+ if not np.all(np.greater(self._max_value, self._min_value)):
740
+ raise ValueError("One (or more) features have min_value >= max_value.")
741
+
742
+ # order in which to impute
743
+ # note this is probably too slow for large feature data (d > 100000)
744
+ # and a better way would be good.
745
+ # see: https://goo.gl/KyCNwj and subsequent comments
746
+ ordered_idx = self._get_ordered_idx(mask_missing_values)
747
+ self.n_features_with_missing_ = len(ordered_idx)
748
+
749
+ abs_corr_mat = self._get_abs_corr_mat(Xt)
750
+
751
+ n_samples, n_features = Xt.shape
752
+ if self.verbose > 0:
753
+ print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
754
+ start_t = time()
755
+ if not self.sample_posterior:
756
+ Xt_previous = Xt.copy()
757
+ normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
758
+ for self.n_iter_ in range(1, self.max_iter + 1):
759
+ if self.imputation_order == "random":
760
+ ordered_idx = self._get_ordered_idx(mask_missing_values)
761
+
762
+ for feat_idx in ordered_idx:
763
+ neighbor_feat_idx = self._get_neighbor_feat_idx(
764
+ n_features, feat_idx, abs_corr_mat
765
+ )
766
+ Xt, estimator = self._impute_one_feature(
767
+ Xt,
768
+ mask_missing_values,
769
+ feat_idx,
770
+ neighbor_feat_idx,
771
+ estimator=None,
772
+ fit_mode=True,
773
+ )
774
+ estimator_triplet = _ImputerTriplet(
775
+ feat_idx, neighbor_feat_idx, estimator
776
+ )
777
+ self.imputation_sequence_.append(estimator_triplet)
778
+
779
+ if self.verbose > 1:
780
+ print(
781
+ "[IterativeImputer] Ending imputation round "
782
+ "%d/%d, elapsed time %0.2f"
783
+ % (self.n_iter_, self.max_iter, time() - start_t)
784
+ )
785
+
786
+ if not self.sample_posterior:
787
+ inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
788
+ if self.verbose > 0:
789
+ print(
790
+ "[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
791
+ inf_norm, normalized_tol
792
+ )
793
+ )
794
+ if inf_norm < normalized_tol:
795
+ if self.verbose > 0:
796
+ print("[IterativeImputer] Early stopping criterion reached.")
797
+ break
798
+ Xt_previous = Xt.copy()
799
+ else:
800
+ if not self.sample_posterior:
801
+ warnings.warn(
802
+ "[IterativeImputer] Early stopping criterion not reached.",
803
+ ConvergenceWarning,
804
+ )
805
+ _assign_where(Xt, X, cond=~mask_missing_values)
806
+
807
+ return super()._concatenate_indicator(Xt, X_indicator)
808
+
809
+ def transform(self, X):
810
+ """Impute all missing values in `X`.
811
+
812
+ Note that this is stochastic, and that if `random_state` is not fixed,
813
+ repeated calls, or permuted input, results will differ.
814
+
815
+ Parameters
816
+ ----------
817
+ X : array-like of shape (n_samples, n_features)
818
+ The input data to complete.
819
+
820
+ Returns
821
+ -------
822
+ Xt : array-like, shape (n_samples, n_features)
823
+ The imputed input data.
824
+ """
825
+ check_is_fitted(self)
826
+
827
+ X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
828
+ X, in_fit=False
829
+ )
830
+
831
+ X_indicator = super()._transform_indicator(complete_mask)
832
+
833
+ if self.n_iter_ == 0 or np.all(mask_missing_values):
834
+ return super()._concatenate_indicator(Xt, X_indicator)
835
+
836
+ imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
837
+ i_rnd = 0
838
+ if self.verbose > 0:
839
+ print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
840
+ start_t = time()
841
+ for it, estimator_triplet in enumerate(self.imputation_sequence_):
842
+ Xt, _ = self._impute_one_feature(
843
+ Xt,
844
+ mask_missing_values,
845
+ estimator_triplet.feat_idx,
846
+ estimator_triplet.neighbor_feat_idx,
847
+ estimator=estimator_triplet.estimator,
848
+ fit_mode=False,
849
+ )
850
+ if not (it + 1) % imputations_per_round:
851
+ if self.verbose > 1:
852
+ print(
853
+ "[IterativeImputer] Ending imputation round "
854
+ "%d/%d, elapsed time %0.2f"
855
+ % (i_rnd + 1, self.n_iter_, time() - start_t)
856
+ )
857
+ i_rnd += 1
858
+
859
+ _assign_where(Xt, X, cond=~mask_missing_values)
860
+
861
+ return super()._concatenate_indicator(Xt, X_indicator)
862
+
863
+ def fit(self, X, y=None):
864
+ """Fit the imputer on `X` and return self.
865
+
866
+ Parameters
867
+ ----------
868
+ X : array-like, shape (n_samples, n_features)
869
+ Input data, where `n_samples` is the number of samples and
870
+ `n_features` is the number of features.
871
+
872
+ y : Ignored
873
+ Not used, present for API consistency by convention.
874
+
875
+ Returns
876
+ -------
877
+ self : object
878
+ Fitted estimator.
879
+ """
880
+ self.fit_transform(X)
881
+ return self
882
+
883
+ def get_feature_names_out(self, input_features=None):
884
+ """Get output feature names for transformation.
885
+
886
+ Parameters
887
+ ----------
888
+ input_features : array-like of str or None, default=None
889
+ Input features.
890
+
891
+ - If `input_features` is `None`, then `feature_names_in_` is
892
+ used as feature names in. If `feature_names_in_` is not defined,
893
+ then the following input feature names are generated:
894
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
895
+ - If `input_features` is an array-like, then `input_features` must
896
+ match `feature_names_in_` if `feature_names_in_` is defined.
897
+
898
+ Returns
899
+ -------
900
+ feature_names_out : ndarray of str objects
901
+ Transformed feature names.
902
+ """
903
+ check_is_fitted(self, "n_features_in_")
904
+ input_features = _check_feature_names_in(self, input_features)
905
+ names = self.initial_imputer_.get_feature_names_out(input_features)
906
+ return self._concatenate_indicator_feature_names_out(names, input_features)
env-llmeval/lib/python3.10/site-packages/sklearn/impute/_knn.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Ashim Bhattarai <[email protected]>
2
+ # Thomas J Fan <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Integral
6
+
7
+ import numpy as np
8
+
9
+ from ..base import _fit_context
10
+ from ..metrics import pairwise_distances_chunked
11
+ from ..metrics.pairwise import _NAN_METRICS
12
+ from ..neighbors._base import _get_weights
13
+ from ..utils import is_scalar_nan
14
+ from ..utils._mask import _get_mask
15
+ from ..utils._param_validation import Hidden, Interval, StrOptions
16
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
17
+ from ._base import _BaseImputer
18
+
19
+
20
+ class KNNImputer(_BaseImputer):
21
+ """Imputation for completing missing values using k-Nearest Neighbors.
22
+
23
+ Each sample's missing values are imputed using the mean value from
24
+ `n_neighbors` nearest neighbors found in the training set. Two samples are
25
+ close if the features that neither is missing are close.
26
+
27
+ Read more in the :ref:`User Guide <knnimpute>`.
28
+
29
+ .. versionadded:: 0.22
30
+
31
+ Parameters
32
+ ----------
33
+ missing_values : int, float, str, np.nan or None, default=np.nan
34
+ The placeholder for the missing values. All occurrences of
35
+ `missing_values` will be imputed. For pandas' dataframes with
36
+ nullable integer dtypes with missing values, `missing_values`
37
+ should be set to np.nan, since `pd.NA` will be converted to np.nan.
38
+
39
+ n_neighbors : int, default=5
40
+ Number of neighboring samples to use for imputation.
41
+
42
+ weights : {'uniform', 'distance'} or callable, default='uniform'
43
+ Weight function used in prediction. Possible values:
44
+
45
+ - 'uniform' : uniform weights. All points in each neighborhood are
46
+ weighted equally.
47
+ - 'distance' : weight points by the inverse of their distance.
48
+ in this case, closer neighbors of a query point will have a
49
+ greater influence than neighbors which are further away.
50
+ - callable : a user-defined function which accepts an
51
+ array of distances, and returns an array of the same shape
52
+ containing the weights.
53
+
54
+ metric : {'nan_euclidean'} or callable, default='nan_euclidean'
55
+ Distance metric for searching neighbors. Possible values:
56
+
57
+ - 'nan_euclidean'
58
+ - callable : a user-defined function which conforms to the definition
59
+ of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
60
+ accepts two arrays, X and Y, and a `missing_values` keyword in
61
+ `kwds` and returns a scalar distance value.
62
+
63
+ copy : bool, default=True
64
+ If True, a copy of X will be created. If False, imputation will
65
+ be done in-place whenever possible.
66
+
67
+ add_indicator : bool, default=False
68
+ If True, a :class:`MissingIndicator` transform will stack onto the
69
+ output of the imputer's transform. This allows a predictive estimator
70
+ to account for missingness despite imputation. If a feature has no
71
+ missing values at fit/train time, the feature won't appear on the
72
+ missing indicator even if there are missing values at transform/test
73
+ time.
74
+
75
+ keep_empty_features : bool, default=False
76
+ If True, features that consist exclusively of missing values when
77
+ `fit` is called are returned in results when `transform` is called.
78
+ The imputed value is always `0`.
79
+
80
+ .. versionadded:: 1.2
81
+
82
+ Attributes
83
+ ----------
84
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
85
+ Indicator used to add binary indicators for missing values.
86
+ ``None`` if add_indicator is False.
87
+
88
+ n_features_in_ : int
89
+ Number of features seen during :term:`fit`.
90
+
91
+ .. versionadded:: 0.24
92
+
93
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
94
+ Names of features seen during :term:`fit`. Defined only when `X`
95
+ has feature names that are all strings.
96
+
97
+ .. versionadded:: 1.0
98
+
99
+ See Also
100
+ --------
101
+ SimpleImputer : Univariate imputer for completing missing values
102
+ with simple strategies.
103
+ IterativeImputer : Multivariate imputer that estimates values to impute for
104
+ each feature with missing values from all the others.
105
+
106
+ References
107
+ ----------
108
+ * `Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
109
+ Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
110
+ value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
111
+ no. 6, 2001 Pages 520-525.
112
+ <https://academic.oup.com/bioinformatics/article/17/6/520/272365>`_
113
+
114
+ Examples
115
+ --------
116
+ >>> import numpy as np
117
+ >>> from sklearn.impute import KNNImputer
118
+ >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
119
+ >>> imputer = KNNImputer(n_neighbors=2)
120
+ >>> imputer.fit_transform(X)
121
+ array([[1. , 2. , 4. ],
122
+ [3. , 4. , 3. ],
123
+ [5.5, 6. , 5. ],
124
+ [8. , 8. , 7. ]])
125
+
126
+ For a more detailed example see
127
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`.
128
+ """
129
+
130
+ _parameter_constraints: dict = {
131
+ **_BaseImputer._parameter_constraints,
132
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
133
+ "weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)],
134
+ "metric": [StrOptions(set(_NAN_METRICS)), callable],
135
+ "copy": ["boolean"],
136
+ }
137
+
138
+ def __init__(
139
+ self,
140
+ *,
141
+ missing_values=np.nan,
142
+ n_neighbors=5,
143
+ weights="uniform",
144
+ metric="nan_euclidean",
145
+ copy=True,
146
+ add_indicator=False,
147
+ keep_empty_features=False,
148
+ ):
149
+ super().__init__(
150
+ missing_values=missing_values,
151
+ add_indicator=add_indicator,
152
+ keep_empty_features=keep_empty_features,
153
+ )
154
+ self.n_neighbors = n_neighbors
155
+ self.weights = weights
156
+ self.metric = metric
157
+ self.copy = copy
158
+
159
+ def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):
160
+ """Helper function to impute a single column.
161
+
162
+ Parameters
163
+ ----------
164
+ dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
165
+ Distance matrix between the receivers and potential donors from
166
+ training set. There must be at least one non-nan distance between
167
+ a receiver and a potential donor.
168
+
169
+ n_neighbors : int
170
+ Number of neighbors to consider.
171
+
172
+ fit_X_col : ndarray of shape (n_potential_donors,)
173
+ Column of potential donors from training set.
174
+
175
+ mask_fit_X_col : ndarray of shape (n_potential_donors,)
176
+ Missing mask for fit_X_col.
177
+
178
+ Returns
179
+ -------
180
+ imputed_values: ndarray of shape (n_receivers,)
181
+ Imputed values for receiver.
182
+ """
183
+ # Get donors
184
+ donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[
185
+ :, :n_neighbors
186
+ ]
187
+
188
+ # Get weight matrix from distance matrix
189
+ donors_dist = dist_pot_donors[
190
+ np.arange(donors_idx.shape[0])[:, None], donors_idx
191
+ ]
192
+
193
+ weight_matrix = _get_weights(donors_dist, self.weights)
194
+
195
+ # fill nans with zeros
196
+ if weight_matrix is not None:
197
+ weight_matrix[np.isnan(weight_matrix)] = 0.0
198
+
199
+ # Retrieve donor values and calculate kNN average
200
+ donors = fit_X_col.take(donors_idx)
201
+ donors_mask = mask_fit_X_col.take(donors_idx)
202
+ donors = np.ma.array(donors, mask=donors_mask)
203
+
204
+ return np.ma.average(donors, axis=1, weights=weight_matrix).data
205
+
206
+ @_fit_context(prefer_skip_nested_validation=True)
207
+ def fit(self, X, y=None):
208
+ """Fit the imputer on X.
209
+
210
+ Parameters
211
+ ----------
212
+ X : array-like shape of (n_samples, n_features)
213
+ Input data, where `n_samples` is the number of samples and
214
+ `n_features` is the number of features.
215
+
216
+ y : Ignored
217
+ Not used, present here for API consistency by convention.
218
+
219
+ Returns
220
+ -------
221
+ self : object
222
+ The fitted `KNNImputer` class instance.
223
+ """
224
+ # Check data integrity and calling arguments
225
+ if not is_scalar_nan(self.missing_values):
226
+ force_all_finite = True
227
+ else:
228
+ force_all_finite = "allow-nan"
229
+
230
+ X = self._validate_data(
231
+ X,
232
+ accept_sparse=False,
233
+ dtype=FLOAT_DTYPES,
234
+ force_all_finite=force_all_finite,
235
+ copy=self.copy,
236
+ )
237
+
238
+ self._fit_X = X
239
+ self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
240
+ self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
241
+
242
+ super()._fit_indicator(self._mask_fit_X)
243
+
244
+ return self
245
+
246
+ def transform(self, X):
247
+ """Impute all missing values in X.
248
+
249
+ Parameters
250
+ ----------
251
+ X : array-like of shape (n_samples, n_features)
252
+ The input data to complete.
253
+
254
+ Returns
255
+ -------
256
+ X : array-like of shape (n_samples, n_output_features)
257
+ The imputed dataset. `n_output_features` is the number of features
258
+ that is not always missing during `fit`.
259
+ """
260
+
261
+ check_is_fitted(self)
262
+ if not is_scalar_nan(self.missing_values):
263
+ force_all_finite = True
264
+ else:
265
+ force_all_finite = "allow-nan"
266
+ X = self._validate_data(
267
+ X,
268
+ accept_sparse=False,
269
+ dtype=FLOAT_DTYPES,
270
+ force_all_finite=force_all_finite,
271
+ copy=self.copy,
272
+ reset=False,
273
+ )
274
+
275
+ mask = _get_mask(X, self.missing_values)
276
+ mask_fit_X = self._mask_fit_X
277
+ valid_mask = self._valid_mask
278
+
279
+ X_indicator = super()._transform_indicator(mask)
280
+
281
+ # Removes columns where the training data is all nan
282
+ if not np.any(mask):
283
+ # No missing values in X
284
+ if self.keep_empty_features:
285
+ Xc = X
286
+ Xc[:, ~valid_mask] = 0
287
+ else:
288
+ Xc = X[:, valid_mask]
289
+
290
+ # Even if there are no missing values in X, we still concatenate Xc
291
+ # with the missing value indicator matrix, X_indicator.
292
+ # This is to ensure that the output maintains consistency in terms
293
+ # of columns, regardless of whether missing values exist in X or not.
294
+ return super()._concatenate_indicator(Xc, X_indicator)
295
+
296
+ row_missing_idx = np.flatnonzero(mask.any(axis=1))
297
+
298
+ non_missing_fix_X = np.logical_not(mask_fit_X)
299
+
300
+ # Maps from indices from X to indices in dist matrix
301
+ dist_idx_map = np.zeros(X.shape[0], dtype=int)
302
+ dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
303
+
304
+ def process_chunk(dist_chunk, start):
305
+ row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
306
+
307
+ # Find and impute missing by column
308
+ for col in range(X.shape[1]):
309
+ if not valid_mask[col]:
310
+ # column was all missing during training
311
+ continue
312
+
313
+ col_mask = mask[row_missing_chunk, col]
314
+ if not np.any(col_mask):
315
+ # column has no missing values
316
+ continue
317
+
318
+ (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
319
+
320
+ # receivers_idx are indices in X
321
+ receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
322
+
323
+ # distances for samples that needed imputation for column
324
+ dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
325
+ :, potential_donors_idx
326
+ ]
327
+
328
+ # receivers with all nan distances impute with mean
329
+ all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
330
+ all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
331
+
332
+ if all_nan_receivers_idx.size:
333
+ col_mean = np.ma.array(
334
+ self._fit_X[:, col], mask=mask_fit_X[:, col]
335
+ ).mean()
336
+ X[all_nan_receivers_idx, col] = col_mean
337
+
338
+ if len(all_nan_receivers_idx) == len(receivers_idx):
339
+ # all receivers imputed with mean
340
+ continue
341
+
342
+ # receivers with at least one defined distance
343
+ receivers_idx = receivers_idx[~all_nan_dist_mask]
344
+ dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
345
+ :, potential_donors_idx
346
+ ]
347
+
348
+ n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
349
+ value = self._calc_impute(
350
+ dist_subset,
351
+ n_neighbors,
352
+ self._fit_X[potential_donors_idx, col],
353
+ mask_fit_X[potential_donors_idx, col],
354
+ )
355
+ X[receivers_idx, col] = value
356
+
357
+ # process in fixed-memory chunks
358
+ gen = pairwise_distances_chunked(
359
+ X[row_missing_idx, :],
360
+ self._fit_X,
361
+ metric=self.metric,
362
+ missing_values=self.missing_values,
363
+ force_all_finite=force_all_finite,
364
+ reduce_func=process_chunk,
365
+ )
366
+ for chunk in gen:
367
+ # process_chunk modifies X in place. No return value.
368
+ pass
369
+
370
+ if self.keep_empty_features:
371
+ Xc = X
372
+ Xc[:, ~valid_mask] = 0
373
+ else:
374
+ Xc = X[:, valid_mask]
375
+
376
+ return super()._concatenate_indicator(Xc, X_indicator)
377
+
378
+ def get_feature_names_out(self, input_features=None):
379
+ """Get output feature names for transformation.
380
+
381
+ Parameters
382
+ ----------
383
+ input_features : array-like of str or None, default=None
384
+ Input features.
385
+
386
+ - If `input_features` is `None`, then `feature_names_in_` is
387
+ used as feature names in. If `feature_names_in_` is not defined,
388
+ then the following input feature names are generated:
389
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
390
+ - If `input_features` is an array-like, then `input_features` must
391
+ match `feature_names_in_` if `feature_names_in_` is defined.
392
+
393
+ Returns
394
+ -------
395
+ feature_names_out : ndarray of str objects
396
+ Transformed feature names.
397
+ """
398
+ check_is_fitted(self, "n_features_in_")
399
+ input_features = _check_feature_names_in(self, input_features)
400
+ names = input_features[self._valid_mask]
401
+ return self._concatenate_indicator_feature_names_out(names, input_features)
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (4.46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (5.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc ADDED
Binary file (43.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.impute._base import _BaseImputer
5
+ from sklearn.impute._iterative import _assign_where
6
+ from sklearn.utils._mask import _get_mask
7
+ from sklearn.utils._testing import _convert_container, assert_allclose
8
+
9
+
10
+ @pytest.fixture
11
+ def data():
12
+ X = np.random.randn(10, 2)
13
+ X[::2] = np.nan
14
+ return X
15
+
16
+
17
+ class NoFitIndicatorImputer(_BaseImputer):
18
+ def fit(self, X, y=None):
19
+ return self
20
+
21
+ def transform(self, X, y=None):
22
+ return self._concatenate_indicator(X, self._transform_indicator(X))
23
+
24
+
25
+ class NoTransformIndicatorImputer(_BaseImputer):
26
+ def fit(self, X, y=None):
27
+ mask = _get_mask(X, value_to_mask=np.nan)
28
+ super()._fit_indicator(mask)
29
+ return self
30
+
31
+ def transform(self, X, y=None):
32
+ return self._concatenate_indicator(X, None)
33
+
34
+
35
+ class NoPrecomputedMaskFit(_BaseImputer):
36
+ def fit(self, X, y=None):
37
+ self._fit_indicator(X)
38
+ return self
39
+
40
+ def transform(self, X):
41
+ return self._concatenate_indicator(X, self._transform_indicator(X))
42
+
43
+
44
+ class NoPrecomputedMaskTransform(_BaseImputer):
45
+ def fit(self, X, y=None):
46
+ mask = _get_mask(X, value_to_mask=np.nan)
47
+ self._fit_indicator(mask)
48
+ return self
49
+
50
+ def transform(self, X):
51
+ return self._concatenate_indicator(X, self._transform_indicator(X))
52
+
53
+
54
+ def test_base_imputer_not_fit(data):
55
+ imputer = NoFitIndicatorImputer(add_indicator=True)
56
+ err_msg = "Make sure to call _fit_indicator before _transform_indicator"
57
+ with pytest.raises(ValueError, match=err_msg):
58
+ imputer.fit(data).transform(data)
59
+ with pytest.raises(ValueError, match=err_msg):
60
+ imputer.fit_transform(data)
61
+
62
+
63
+ def test_base_imputer_not_transform(data):
64
+ imputer = NoTransformIndicatorImputer(add_indicator=True)
65
+ err_msg = (
66
+ "Call _fit_indicator and _transform_indicator in the imputer implementation"
67
+ )
68
+ with pytest.raises(ValueError, match=err_msg):
69
+ imputer.fit(data).transform(data)
70
+ with pytest.raises(ValueError, match=err_msg):
71
+ imputer.fit_transform(data)
72
+
73
+
74
+ def test_base_no_precomputed_mask_fit(data):
75
+ imputer = NoPrecomputedMaskFit(add_indicator=True)
76
+ err_msg = "precomputed is True but the input data is not a mask"
77
+ with pytest.raises(ValueError, match=err_msg):
78
+ imputer.fit(data)
79
+ with pytest.raises(ValueError, match=err_msg):
80
+ imputer.fit_transform(data)
81
+
82
+
83
+ def test_base_no_precomputed_mask_transform(data):
84
+ imputer = NoPrecomputedMaskTransform(add_indicator=True)
85
+ err_msg = "precomputed is True but the input data is not a mask"
86
+ imputer.fit(data)
87
+ with pytest.raises(ValueError, match=err_msg):
88
+ imputer.transform(data)
89
+ with pytest.raises(ValueError, match=err_msg):
90
+ imputer.fit_transform(data)
91
+
92
+
93
+ @pytest.mark.parametrize("X1_type", ["array", "dataframe"])
94
+ def test_assign_where(X1_type):
95
+ """Check the behaviour of the private helpers `_assign_where`."""
96
+ rng = np.random.RandomState(0)
97
+
98
+ n_samples, n_features = 10, 5
99
+ X1 = _convert_container(rng.randn(n_samples, n_features), constructor_name=X1_type)
100
+ X2 = rng.randn(n_samples, n_features)
101
+ mask = rng.randint(0, 2, size=(n_samples, n_features)).astype(bool)
102
+
103
+ _assign_where(X1, X2, mask)
104
+
105
+ if X1_type == "dataframe":
106
+ X1 = X1.to_numpy()
107
+ assert_allclose(X1[mask], X2[mask])
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.experimental import enable_iterative_imputer # noqa
5
+ from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer
6
+ from sklearn.utils._testing import (
7
+ assert_allclose,
8
+ assert_allclose_dense_sparse,
9
+ assert_array_equal,
10
+ )
11
+ from sklearn.utils.fixes import CSR_CONTAINERS
12
+
13
+
14
+ def imputers():
15
+ return [IterativeImputer(tol=0.1), KNNImputer(), SimpleImputer()]
16
+
17
+
18
+ def sparse_imputers():
19
+ return [SimpleImputer()]
20
+
21
+
22
+ # ConvergenceWarning will be raised by the IterativeImputer
23
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
24
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
25
+ def test_imputation_missing_value_in_test_array(imputer):
26
+ # [Non Regression Test for issue #13968] Missing value in test set should
27
+ # not throw an error and return a finite dataset
28
+ train = [[1], [2]]
29
+ test = [[3], [np.nan]]
30
+ imputer.set_params(add_indicator=True)
31
+ imputer.fit(train).transform(test)
32
+
33
+
34
+ # ConvergenceWarning will be raised by the IterativeImputer
35
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
36
+ @pytest.mark.parametrize("marker", [np.nan, -1, 0])
37
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
38
+ def test_imputers_add_indicator(marker, imputer):
39
+ X = np.array(
40
+ [
41
+ [marker, 1, 5, marker, 1],
42
+ [2, marker, 1, marker, 2],
43
+ [6, 3, marker, marker, 3],
44
+ [1, 2, 9, marker, 4],
45
+ ]
46
+ )
47
+ X_true_indicator = np.array(
48
+ [
49
+ [1.0, 0.0, 0.0, 1.0],
50
+ [0.0, 1.0, 0.0, 1.0],
51
+ [0.0, 0.0, 1.0, 1.0],
52
+ [0.0, 0.0, 0.0, 1.0],
53
+ ]
54
+ )
55
+ imputer.set_params(missing_values=marker, add_indicator=True)
56
+
57
+ X_trans = imputer.fit_transform(X)
58
+ assert_allclose(X_trans[:, -4:], X_true_indicator)
59
+ assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3]))
60
+
61
+ imputer.set_params(add_indicator=False)
62
+ X_trans_no_indicator = imputer.fit_transform(X)
63
+ assert_allclose(X_trans[:, :-4], X_trans_no_indicator)
64
+
65
+
66
+ # ConvergenceWarning will be raised by the IterativeImputer
67
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
68
+ @pytest.mark.parametrize("marker", [np.nan, -1])
69
+ @pytest.mark.parametrize(
70
+ "imputer", sparse_imputers(), ids=lambda x: x.__class__.__name__
71
+ )
72
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
73
+ def test_imputers_add_indicator_sparse(imputer, marker, csr_container):
74
+ X = csr_container(
75
+ [
76
+ [marker, 1, 5, marker, 1],
77
+ [2, marker, 1, marker, 2],
78
+ [6, 3, marker, marker, 3],
79
+ [1, 2, 9, marker, 4],
80
+ ]
81
+ )
82
+ X_true_indicator = csr_container(
83
+ [
84
+ [1.0, 0.0, 0.0, 1.0],
85
+ [0.0, 1.0, 0.0, 1.0],
86
+ [0.0, 0.0, 1.0, 1.0],
87
+ [0.0, 0.0, 0.0, 1.0],
88
+ ]
89
+ )
90
+ imputer.set_params(missing_values=marker, add_indicator=True)
91
+
92
+ X_trans = imputer.fit_transform(X)
93
+ assert_allclose_dense_sparse(X_trans[:, -4:], X_true_indicator)
94
+ assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3]))
95
+
96
+ imputer.set_params(add_indicator=False)
97
+ X_trans_no_indicator = imputer.fit_transform(X)
98
+ assert_allclose_dense_sparse(X_trans[:, :-4], X_trans_no_indicator)
99
+
100
+
101
+ # ConvergenceWarning will be raised by the IterativeImputer
102
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
103
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
104
+ @pytest.mark.parametrize("add_indicator", [True, False])
105
+ def test_imputers_pandas_na_integer_array_support(imputer, add_indicator):
106
+ # Test pandas IntegerArray with pd.NA
107
+ pd = pytest.importorskip("pandas")
108
+ marker = np.nan
109
+ imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker)
110
+
111
+ X = np.array(
112
+ [
113
+ [marker, 1, 5, marker, 1],
114
+ [2, marker, 1, marker, 2],
115
+ [6, 3, marker, marker, 3],
116
+ [1, 2, 9, marker, 4],
117
+ ]
118
+ )
119
+ # fit on numpy array
120
+ X_trans_expected = imputer.fit_transform(X)
121
+
122
+ # Creates dataframe with IntegerArrays with pd.NA
123
+ X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c", "d", "e"])
124
+
125
+ # fit on pandas dataframe with IntegerArrays
126
+ X_trans = imputer.fit_transform(X_df)
127
+
128
+ assert_allclose(X_trans_expected, X_trans)
129
+
130
+
131
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
132
+ @pytest.mark.parametrize("add_indicator", [True, False])
133
+ def test_imputers_feature_names_out_pandas(imputer, add_indicator):
134
+ """Check feature names out for imputers."""
135
+ pd = pytest.importorskip("pandas")
136
+ marker = np.nan
137
+ imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker)
138
+
139
+ X = np.array(
140
+ [
141
+ [marker, 1, 5, 3, marker, 1],
142
+ [2, marker, 1, 4, marker, 2],
143
+ [6, 3, 7, marker, marker, 3],
144
+ [1, 2, 9, 8, marker, 4],
145
+ ]
146
+ )
147
+ X_df = pd.DataFrame(X, columns=["a", "b", "c", "d", "e", "f"])
148
+ imputer.fit(X_df)
149
+
150
+ names = imputer.get_feature_names_out()
151
+
152
+ if add_indicator:
153
+ expected_names = [
154
+ "a",
155
+ "b",
156
+ "c",
157
+ "d",
158
+ "f",
159
+ "missingindicator_a",
160
+ "missingindicator_b",
161
+ "missingindicator_d",
162
+ "missingindicator_e",
163
+ ]
164
+ assert_array_equal(expected_names, names)
165
+ else:
166
+ expected_names = ["a", "b", "c", "d", "f"]
167
+ assert_array_equal(expected_names, names)
168
+
169
+
170
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
171
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
172
+ def test_keep_empty_features(imputer, keep_empty_features):
173
+ """Check that the imputer keeps features with only missing values."""
174
+ X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])
175
+ imputer = imputer.set_params(
176
+ add_indicator=False, keep_empty_features=keep_empty_features
177
+ )
178
+
179
+ for method in ["fit_transform", "transform"]:
180
+ X_imputed = getattr(imputer, method)(X)
181
+ if keep_empty_features:
182
+ assert X_imputed.shape == X.shape
183
+ else:
184
+ assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
185
+
186
+
187
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
188
+ @pytest.mark.parametrize("missing_value_test", [np.nan, 1])
189
+ def test_imputation_adds_missing_indicator_if_add_indicator_is_true(
190
+ imputer, missing_value_test
191
+ ):
192
+ """Check that missing indicator always exists when add_indicator=True.
193
+
194
+ Non-regression test for gh-26590.
195
+ """
196
+ X_train = np.array([[0, np.nan], [1, 2]])
197
+
198
+ # Test data where missing_value_test variable can be set to np.nan or 1.
199
+ X_test = np.array([[0, missing_value_test], [1, 2]])
200
+
201
+ imputer.set_params(add_indicator=True)
202
+ imputer.fit(X_train)
203
+
204
+ X_test_imputed_with_indicator = imputer.transform(X_test)
205
+ assert X_test_imputed_with_indicator.shape == (2, 3)
206
+
207
+ imputer.set_params(add_indicator=False)
208
+ imputer.fit(X_train)
209
+ X_test_imputed_without_indicator = imputer.transform(X_test)
210
+ assert X_test_imputed_without_indicator.shape == (2, 2)
211
+
212
+ assert_allclose(
213
+ X_test_imputed_with_indicator[:, :-1], X_test_imputed_without_indicator
214
+ )
215
+ if np.isnan(missing_value_test):
216
+ expected_missing_indicator = [1, 0]
217
+ else:
218
+ expected_missing_indicator = [0, 0]
219
+
220
+ assert_allclose(X_test_imputed_with_indicator[:, -1], expected_missing_indicator)
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py ADDED
@@ -0,0 +1,1754 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import re
3
+ import warnings
4
+ from itertools import product
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from scipy import sparse
9
+ from scipy.stats import kstest
10
+
11
+ from sklearn import tree
12
+ from sklearn.datasets import load_diabetes
13
+ from sklearn.dummy import DummyRegressor
14
+ from sklearn.exceptions import ConvergenceWarning
15
+
16
+ # make IterativeImputer available
17
+ from sklearn.experimental import enable_iterative_imputer # noqa
18
+ from sklearn.impute import IterativeImputer, KNNImputer, MissingIndicator, SimpleImputer
19
+ from sklearn.impute._base import _most_frequent
20
+ from sklearn.linear_model import ARDRegression, BayesianRidge, RidgeCV
21
+ from sklearn.model_selection import GridSearchCV
22
+ from sklearn.pipeline import Pipeline, make_union
23
+ from sklearn.random_projection import _sparse_random_matrix
24
+ from sklearn.utils._testing import (
25
+ _convert_container,
26
+ assert_allclose,
27
+ assert_allclose_dense_sparse,
28
+ assert_array_almost_equal,
29
+ assert_array_equal,
30
+ )
31
+ from sklearn.utils.fixes import (
32
+ BSR_CONTAINERS,
33
+ COO_CONTAINERS,
34
+ CSC_CONTAINERS,
35
+ CSR_CONTAINERS,
36
+ LIL_CONTAINERS,
37
+ )
38
+
39
+
40
+ def _assert_array_equal_and_same_dtype(x, y):
41
+ assert_array_equal(x, y)
42
+ assert x.dtype == y.dtype
43
+
44
+
45
+ def _assert_allclose_and_same_dtype(x, y):
46
+ assert_allclose(x, y)
47
+ assert x.dtype == y.dtype
48
+
49
+
50
+ def _check_statistics(
51
+ X, X_true, strategy, statistics, missing_values, sparse_container
52
+ ):
53
+ """Utility function for testing imputation for a given strategy.
54
+
55
+ Test with dense and sparse arrays
56
+
57
+ Check that:
58
+ - the statistics (mean, median, mode) are correct
59
+ - the missing values are imputed correctly"""
60
+
61
+ err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % (
62
+ strategy,
63
+ missing_values,
64
+ )
65
+
66
+ assert_ae = assert_array_equal
67
+
68
+ if X.dtype.kind == "f" or X_true.dtype.kind == "f":
69
+ assert_ae = assert_array_almost_equal
70
+
71
+ # Normal matrix
72
+ imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
73
+ X_trans = imputer.fit(X).transform(X.copy())
74
+ assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False))
75
+ assert_ae(X_trans, X_true, err_msg=err_msg.format(False))
76
+
77
+ # Sparse matrix
78
+ imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
79
+ imputer.fit(sparse_container(X))
80
+ X_trans = imputer.transform(sparse_container(X.copy()))
81
+
82
+ if sparse.issparse(X_trans):
83
+ X_trans = X_trans.toarray()
84
+
85
+ assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True))
86
+ assert_ae(X_trans, X_true, err_msg=err_msg.format(True))
87
+
88
+
89
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
90
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
91
+ def test_imputation_shape(strategy, csr_container):
92
+ # Verify the shapes of the imputed matrix for different strategies.
93
+ X = np.random.randn(10, 2)
94
+ X[::2] = np.nan
95
+
96
+ imputer = SimpleImputer(strategy=strategy)
97
+ X_imputed = imputer.fit_transform(csr_container(X))
98
+ assert X_imputed.shape == (10, 2)
99
+ X_imputed = imputer.fit_transform(X)
100
+ assert X_imputed.shape == (10, 2)
101
+
102
+ iterative_imputer = IterativeImputer(initial_strategy=strategy)
103
+ X_imputed = iterative_imputer.fit_transform(X)
104
+ assert X_imputed.shape == (10, 2)
105
+
106
+
107
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
108
+ def test_imputation_deletion_warning(strategy):
109
+ X = np.ones((3, 5))
110
+ X[:, 0] = np.nan
111
+ imputer = SimpleImputer(strategy=strategy).fit(X)
112
+
113
+ with pytest.warns(UserWarning, match="Skipping"):
114
+ imputer.transform(X)
115
+
116
+
117
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
118
+ def test_imputation_deletion_warning_feature_names(strategy):
119
+ pd = pytest.importorskip("pandas")
120
+
121
+ missing_values = np.nan
122
+ feature_names = np.array(["a", "b", "c", "d"], dtype=object)
123
+ X = pd.DataFrame(
124
+ [
125
+ [missing_values, missing_values, 1, missing_values],
126
+ [4, missing_values, 2, 10],
127
+ ],
128
+ columns=feature_names,
129
+ )
130
+
131
+ imputer = SimpleImputer(strategy=strategy).fit(X)
132
+
133
+ # check SimpleImputer returning feature name attribute correctly
134
+ assert_array_equal(imputer.feature_names_in_, feature_names)
135
+
136
+ # ensure that skipped feature warning includes feature name
137
+ with pytest.warns(
138
+ UserWarning, match=r"Skipping features without any observed values: \['b'\]"
139
+ ):
140
+ imputer.transform(X)
141
+
142
+
143
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
144
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
145
+ def test_imputation_error_sparse_0(strategy, csc_container):
146
+ # check that error are raised when missing_values = 0 and input is sparse
147
+ X = np.ones((3, 5))
148
+ X[0] = 0
149
+ X = csc_container(X)
150
+
151
+ imputer = SimpleImputer(strategy=strategy, missing_values=0)
152
+ with pytest.raises(ValueError, match="Provide a dense array"):
153
+ imputer.fit(X)
154
+
155
+ imputer.fit(X.toarray())
156
+ with pytest.raises(ValueError, match="Provide a dense array"):
157
+ imputer.transform(X)
158
+
159
+
160
+ def safe_median(arr, *args, **kwargs):
161
+ # np.median([]) raises a TypeError for numpy >= 1.10.1
162
+ length = arr.size if hasattr(arr, "size") else len(arr)
163
+ return np.nan if length == 0 else np.median(arr, *args, **kwargs)
164
+
165
+
166
+ def safe_mean(arr, *args, **kwargs):
167
+ # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
168
+ length = arr.size if hasattr(arr, "size") else len(arr)
169
+ return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
170
+
171
+
172
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
173
+ def test_imputation_mean_median(csc_container):
174
+ # Test imputation using the mean and median strategies, when
175
+ # missing_values != 0.
176
+ rng = np.random.RandomState(0)
177
+
178
+ dim = 10
179
+ dec = 10
180
+ shape = (dim * dim, dim + dec)
181
+
182
+ zeros = np.zeros(shape[0])
183
+ values = np.arange(1, shape[0] + 1)
184
+ values[4::2] = -values[4::2]
185
+
186
+ tests = [
187
+ ("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))),
188
+ ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))),
189
+ ]
190
+
191
+ for strategy, test_missing_values, true_value_fun in tests:
192
+ X = np.empty(shape)
193
+ X_true = np.empty(shape)
194
+ true_statistics = np.empty(shape[1])
195
+
196
+ # Create a matrix X with columns
197
+ # - with only zeros,
198
+ # - with only missing values
199
+ # - with zeros, missing values and values
200
+ # And a matrix X_true containing all true values
201
+ for j in range(shape[1]):
202
+ nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
203
+ nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0)
204
+ nb_values = shape[0] - nb_zeros - nb_missing_values
205
+
206
+ z = zeros[:nb_zeros]
207
+ p = np.repeat(test_missing_values, nb_missing_values)
208
+ v = values[rng.permutation(len(values))[:nb_values]]
209
+
210
+ true_statistics[j] = true_value_fun(z, v, p)
211
+
212
+ # Create the columns
213
+ X[:, j] = np.hstack((v, z, p))
214
+
215
+ if 0 == test_missing_values:
216
+ # XXX unreached code as of v0.22
217
+ X_true[:, j] = np.hstack(
218
+ (v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros))
219
+ )
220
+ else:
221
+ X_true[:, j] = np.hstack(
222
+ (v, z, np.repeat(true_statistics[j], nb_missing_values))
223
+ )
224
+
225
+ # Shuffle them the same way
226
+ np.random.RandomState(j).shuffle(X[:, j])
227
+ np.random.RandomState(j).shuffle(X_true[:, j])
228
+
229
+ # Mean doesn't support columns containing NaNs, median does
230
+ if strategy == "median":
231
+ cols_to_keep = ~np.isnan(X_true).any(axis=0)
232
+ else:
233
+ cols_to_keep = ~np.isnan(X_true).all(axis=0)
234
+
235
+ X_true = X_true[:, cols_to_keep]
236
+
237
+ _check_statistics(
238
+ X, X_true, strategy, true_statistics, test_missing_values, csc_container
239
+ )
240
+
241
+
242
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
243
+ def test_imputation_median_special_cases(csc_container):
244
+ # Test median imputation with sparse boundary cases
245
+ X = np.array(
246
+ [
247
+ [0, np.nan, np.nan], # odd: implicit zero
248
+ [5, np.nan, np.nan], # odd: explicit nonzero
249
+ [0, 0, np.nan], # even: average two zeros
250
+ [-5, 0, np.nan], # even: avg zero and neg
251
+ [0, 5, np.nan], # even: avg zero and pos
252
+ [4, 5, np.nan], # even: avg nonzeros
253
+ [-4, -5, np.nan], # even: avg negatives
254
+ [-1, 2, np.nan], # even: crossing neg and pos
255
+ ]
256
+ ).transpose()
257
+
258
+ X_imputed_median = np.array(
259
+ [
260
+ [0, 0, 0],
261
+ [5, 5, 5],
262
+ [0, 0, 0],
263
+ [-5, 0, -2.5],
264
+ [0, 5, 2.5],
265
+ [4, 5, 4.5],
266
+ [-4, -5, -4.5],
267
+ [-1, 2, 0.5],
268
+ ]
269
+ ).transpose()
270
+ statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5]
271
+
272
+ _check_statistics(
273
+ X, X_imputed_median, "median", statistics_median, np.nan, csc_container
274
+ )
275
+
276
+
277
+ @pytest.mark.parametrize("strategy", ["mean", "median"])
278
+ @pytest.mark.parametrize("dtype", [None, object, str])
279
+ def test_imputation_mean_median_error_invalid_type(strategy, dtype):
280
+ X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype)
281
+ msg = "non-numeric data:\ncould not convert string to float:"
282
+ with pytest.raises(ValueError, match=msg):
283
+ imputer = SimpleImputer(strategy=strategy)
284
+ imputer.fit_transform(X)
285
+
286
+
287
+ @pytest.mark.parametrize("strategy", ["mean", "median"])
288
+ @pytest.mark.parametrize("type", ["list", "dataframe"])
289
+ def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type):
290
+ X = [["a", "b", 3], [4, "e", 6], ["g", "h", 9]]
291
+ if type == "dataframe":
292
+ pd = pytest.importorskip("pandas")
293
+ X = pd.DataFrame(X)
294
+ msg = "non-numeric data:\ncould not convert string to float:"
295
+ with pytest.raises(ValueError, match=msg):
296
+ imputer = SimpleImputer(strategy=strategy)
297
+ imputer.fit_transform(X)
298
+
299
+
300
+ @pytest.mark.parametrize("strategy", ["constant", "most_frequent"])
301
+ @pytest.mark.parametrize("dtype", [str, np.dtype("U"), np.dtype("S")])
302
+ def test_imputation_const_mostf_error_invalid_types(strategy, dtype):
303
+ # Test imputation on non-numeric data using "most_frequent" and "constant"
304
+ # strategy
305
+ X = np.array(
306
+ [
307
+ [np.nan, np.nan, "a", "f"],
308
+ [np.nan, "c", np.nan, "d"],
309
+ [np.nan, "b", "d", np.nan],
310
+ [np.nan, "c", "d", "h"],
311
+ ],
312
+ dtype=dtype,
313
+ )
314
+
315
+ err_msg = "SimpleImputer does not support data"
316
+ with pytest.raises(ValueError, match=err_msg):
317
+ imputer = SimpleImputer(strategy=strategy)
318
+ imputer.fit(X).transform(X)
319
+
320
+
321
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
322
+ def test_imputation_most_frequent(csc_container):
323
+ # Test imputation using the most-frequent strategy.
324
+ X = np.array(
325
+ [
326
+ [-1, -1, 0, 5],
327
+ [-1, 2, -1, 3],
328
+ [-1, 1, 3, -1],
329
+ [-1, 2, 3, 7],
330
+ ]
331
+ )
332
+
333
+ X_true = np.array(
334
+ [
335
+ [2, 0, 5],
336
+ [2, 3, 3],
337
+ [1, 3, 3],
338
+ [2, 3, 7],
339
+ ]
340
+ )
341
+
342
+ # scipy.stats.mode, used in SimpleImputer, doesn't return the first most
343
+ # frequent as promised in the doc but the lowest most frequent. When this
344
+ # test will fail after an update of scipy, SimpleImputer will need to be
345
+ # updated to be consistent with the new (correct) behaviour
346
+ _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1, csc_container)
347
+
348
+
349
+ @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
350
+ def test_imputation_most_frequent_objects(marker):
351
+ # Test imputation using the most-frequent strategy.
352
+ X = np.array(
353
+ [
354
+ [marker, marker, "a", "f"],
355
+ [marker, "c", marker, "d"],
356
+ [marker, "b", "d", marker],
357
+ [marker, "c", "d", "h"],
358
+ ],
359
+ dtype=object,
360
+ )
361
+
362
+ X_true = np.array(
363
+ [
364
+ ["c", "a", "f"],
365
+ ["c", "d", "d"],
366
+ ["b", "d", "d"],
367
+ ["c", "d", "h"],
368
+ ],
369
+ dtype=object,
370
+ )
371
+
372
+ imputer = SimpleImputer(missing_values=marker, strategy="most_frequent")
373
+ X_trans = imputer.fit(X).transform(X)
374
+
375
+ assert_array_equal(X_trans, X_true)
376
+
377
+
378
+ @pytest.mark.parametrize("dtype", [object, "category"])
379
+ def test_imputation_most_frequent_pandas(dtype):
380
+ # Test imputation using the most frequent strategy on pandas df
381
+ pd = pytest.importorskip("pandas")
382
+
383
+ f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
384
+
385
+ df = pd.read_csv(f, dtype=dtype)
386
+
387
+ X_true = np.array(
388
+ [["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"]],
389
+ dtype=object,
390
+ )
391
+
392
+ imputer = SimpleImputer(strategy="most_frequent")
393
+ X_trans = imputer.fit_transform(df)
394
+
395
+ assert_array_equal(X_trans, X_true)
396
+
397
+
398
+ @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1.0, np.nan)])
399
+ def test_imputation_constant_error_invalid_type(X_data, missing_value):
400
+ # Verify that exceptions are raised on invalid fill_value type
401
+ X = np.full((3, 5), X_data, dtype=float)
402
+ X[0, 0] = missing_value
403
+
404
+ fill_value = "x"
405
+ err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast"
406
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
407
+ imputer = SimpleImputer(
408
+ missing_values=missing_value, strategy="constant", fill_value=fill_value
409
+ )
410
+ imputer.fit_transform(X)
411
+
412
+
413
+ def test_imputation_constant_integer():
414
+ # Test imputation using the constant strategy on integers
415
+ X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])
416
+
417
+ X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]])
418
+
419
+ imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0)
420
+ X_trans = imputer.fit_transform(X)
421
+
422
+ assert_array_equal(X_trans, X_true)
423
+
424
+
425
+ @pytest.mark.parametrize("array_constructor", CSR_CONTAINERS + [np.asarray])
426
+ def test_imputation_constant_float(array_constructor):
427
+ # Test imputation using the constant strategy on floats
428
+ X = np.array(
429
+ [
430
+ [np.nan, 1.1, 0, np.nan],
431
+ [1.2, np.nan, 1.3, np.nan],
432
+ [0, 0, np.nan, np.nan],
433
+ [1.4, 1.5, 0, np.nan],
434
+ ]
435
+ )
436
+
437
+ X_true = np.array(
438
+ [[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]]
439
+ )
440
+
441
+ X = array_constructor(X)
442
+
443
+ X_true = array_constructor(X_true)
444
+
445
+ imputer = SimpleImputer(strategy="constant", fill_value=-1)
446
+ X_trans = imputer.fit_transform(X)
447
+
448
+ assert_allclose_dense_sparse(X_trans, X_true)
449
+
450
+
451
+ @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
452
+ def test_imputation_constant_object(marker):
453
+ # Test imputation using the constant strategy on objects
454
+ X = np.array(
455
+ [
456
+ [marker, "a", "b", marker],
457
+ ["c", marker, "d", marker],
458
+ ["e", "f", marker, marker],
459
+ ["g", "h", "i", marker],
460
+ ],
461
+ dtype=object,
462
+ )
463
+
464
+ X_true = np.array(
465
+ [
466
+ ["missing", "a", "b", "missing"],
467
+ ["c", "missing", "d", "missing"],
468
+ ["e", "f", "missing", "missing"],
469
+ ["g", "h", "i", "missing"],
470
+ ],
471
+ dtype=object,
472
+ )
473
+
474
+ imputer = SimpleImputer(
475
+ missing_values=marker, strategy="constant", fill_value="missing"
476
+ )
477
+ X_trans = imputer.fit_transform(X)
478
+
479
+ assert_array_equal(X_trans, X_true)
480
+
481
+
482
+ @pytest.mark.parametrize("dtype", [object, "category"])
483
+ def test_imputation_constant_pandas(dtype):
484
+ # Test imputation using the constant strategy on pandas df
485
+ pd = pytest.importorskip("pandas")
486
+
487
+ f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
488
+
489
+ df = pd.read_csv(f, dtype=dtype)
490
+
491
+ X_true = np.array(
492
+ [
493
+ ["missing_value", "i", "x", "missing_value"],
494
+ ["a", "missing_value", "y", "missing_value"],
495
+ ["a", "j", "missing_value", "missing_value"],
496
+ ["b", "j", "x", "missing_value"],
497
+ ],
498
+ dtype=object,
499
+ )
500
+
501
+ imputer = SimpleImputer(strategy="constant")
502
+ X_trans = imputer.fit_transform(df)
503
+
504
+ assert_array_equal(X_trans, X_true)
505
+
506
+
507
+ @pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]])
508
+ def test_iterative_imputer_one_feature(X):
509
+ # check we exit early when there is a single feature
510
+ imputer = IterativeImputer().fit(X)
511
+ assert imputer.n_iter_ == 0
512
+ imputer = IterativeImputer()
513
+ imputer.fit([[1], [2]])
514
+ assert imputer.n_iter_ == 0
515
+ imputer.fit([[1], [np.nan]])
516
+ assert imputer.n_iter_ == 0
517
+
518
+
519
+ def test_imputation_pipeline_grid_search():
520
+ # Test imputation within a pipeline + gridsearch.
521
+ X = _sparse_random_matrix(100, 100, density=0.10)
522
+ missing_values = X.data[0]
523
+
524
+ pipeline = Pipeline(
525
+ [
526
+ ("imputer", SimpleImputer(missing_values=missing_values)),
527
+ ("tree", tree.DecisionTreeRegressor(random_state=0)),
528
+ ]
529
+ )
530
+
531
+ parameters = {"imputer__strategy": ["mean", "median", "most_frequent"]}
532
+
533
+ Y = _sparse_random_matrix(100, 1, density=0.10).toarray()
534
+ gs = GridSearchCV(pipeline, parameters)
535
+ gs.fit(X, Y)
536
+
537
+
538
+ def test_imputation_copy():
539
+ # Test imputation with copy
540
+ X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0)
541
+
542
+ # copy=True, dense => copy
543
+ X = X_orig.copy().toarray()
544
+ imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True)
545
+ Xt = imputer.fit(X).transform(X)
546
+ Xt[0, 0] = -1
547
+ assert not np.all(X == Xt)
548
+
549
+ # copy=True, sparse csr => copy
550
+ X = X_orig.copy()
551
+ imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True)
552
+ Xt = imputer.fit(X).transform(X)
553
+ Xt.data[0] = -1
554
+ assert not np.all(X.data == Xt.data)
555
+
556
+ # copy=False, dense => no copy
557
+ X = X_orig.copy().toarray()
558
+ imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False)
559
+ Xt = imputer.fit(X).transform(X)
560
+ Xt[0, 0] = -1
561
+ assert_array_almost_equal(X, Xt)
562
+
563
+ # copy=False, sparse csc => no copy
564
+ X = X_orig.copy().tocsc()
565
+ imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
566
+ Xt = imputer.fit(X).transform(X)
567
+ Xt.data[0] = -1
568
+ assert_array_almost_equal(X.data, Xt.data)
569
+
570
+ # copy=False, sparse csr => copy
571
+ X = X_orig.copy()
572
+ imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
573
+ Xt = imputer.fit(X).transform(X)
574
+ Xt.data[0] = -1
575
+ assert not np.all(X.data == Xt.data)
576
+
577
+ # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
578
+ # made, even if copy=False.
579
+
580
+
581
+ def test_iterative_imputer_zero_iters():
582
+ rng = np.random.RandomState(0)
583
+
584
+ n = 100
585
+ d = 10
586
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
587
+ missing_flag = X == 0
588
+ X[missing_flag] = np.nan
589
+
590
+ imputer = IterativeImputer(max_iter=0)
591
+ X_imputed = imputer.fit_transform(X)
592
+ # with max_iter=0, only initial imputation is performed
593
+ assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
594
+
595
+ # repeat but force n_iter_ to 0
596
+ imputer = IterativeImputer(max_iter=5).fit(X)
597
+ # transformed should not be equal to initial imputation
598
+ assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X))
599
+
600
+ imputer.n_iter_ = 0
601
+ # now they should be equal as only initial imputation is done
602
+ assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X))
603
+
604
+
605
+ def test_iterative_imputer_verbose():
606
+ rng = np.random.RandomState(0)
607
+
608
+ n = 100
609
+ d = 3
610
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
611
+ imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1)
612
+ imputer.fit(X)
613
+ imputer.transform(X)
614
+ imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2)
615
+ imputer.fit(X)
616
+ imputer.transform(X)
617
+
618
+
619
+ def test_iterative_imputer_all_missing():
620
+ n = 100
621
+ d = 3
622
+ X = np.zeros((n, d))
623
+ imputer = IterativeImputer(missing_values=0, max_iter=1)
624
+ X_imputed = imputer.fit_transform(X)
625
+ assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
626
+
627
+
628
+ @pytest.mark.parametrize(
629
+ "imputation_order", ["random", "roman", "ascending", "descending", "arabic"]
630
+ )
631
+ def test_iterative_imputer_imputation_order(imputation_order):
632
+ rng = np.random.RandomState(0)
633
+ n = 100
634
+ d = 10
635
+ max_iter = 2
636
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
637
+ X[:, 0] = 1 # this column should not be discarded by IterativeImputer
638
+
639
+ imputer = IterativeImputer(
640
+ missing_values=0,
641
+ max_iter=max_iter,
642
+ n_nearest_features=5,
643
+ sample_posterior=False,
644
+ skip_complete=True,
645
+ min_value=0,
646
+ max_value=1,
647
+ verbose=1,
648
+ imputation_order=imputation_order,
649
+ random_state=rng,
650
+ )
651
+ imputer.fit_transform(X)
652
+ ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_]
653
+
654
+ assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_
655
+
656
+ if imputation_order == "roman":
657
+ assert np.all(ordered_idx[: d - 1] == np.arange(1, d))
658
+ elif imputation_order == "arabic":
659
+ assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1))
660
+ elif imputation_order == "random":
661
+ ordered_idx_round_1 = ordered_idx[: d - 1]
662
+ ordered_idx_round_2 = ordered_idx[d - 1 :]
663
+ assert ordered_idx_round_1 != ordered_idx_round_2
664
+ elif "ending" in imputation_order:
665
+ assert len(ordered_idx) == max_iter * (d - 1)
666
+
667
+
668
+ @pytest.mark.parametrize(
669
+ "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()]
670
+ )
671
+ def test_iterative_imputer_estimators(estimator):
672
+ rng = np.random.RandomState(0)
673
+
674
+ n = 100
675
+ d = 10
676
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
677
+
678
+ imputer = IterativeImputer(
679
+ missing_values=0, max_iter=1, estimator=estimator, random_state=rng
680
+ )
681
+ imputer.fit_transform(X)
682
+
683
+ # check that types are correct for estimators
684
+ hashes = []
685
+ for triplet in imputer.imputation_sequence_:
686
+ expected_type = (
687
+ type(estimator) if estimator is not None else type(BayesianRidge())
688
+ )
689
+ assert isinstance(triplet.estimator, expected_type)
690
+ hashes.append(id(triplet.estimator))
691
+
692
+ # check that each estimator is unique
693
+ assert len(set(hashes)) == len(hashes)
694
+
695
+
696
+ def test_iterative_imputer_clip():
697
+ rng = np.random.RandomState(0)
698
+ n = 100
699
+ d = 10
700
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
701
+
702
+ imputer = IterativeImputer(
703
+ missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng
704
+ )
705
+
706
+ Xt = imputer.fit_transform(X)
707
+ assert_allclose(np.min(Xt[X == 0]), 0.1)
708
+ assert_allclose(np.max(Xt[X == 0]), 0.2)
709
+ assert_allclose(Xt[X != 0], X[X != 0])
710
+
711
+
712
+ def test_iterative_imputer_clip_truncnorm():
713
+ rng = np.random.RandomState(0)
714
+ n = 100
715
+ d = 10
716
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
717
+ X[:, 0] = 1
718
+
719
+ imputer = IterativeImputer(
720
+ missing_values=0,
721
+ max_iter=2,
722
+ n_nearest_features=5,
723
+ sample_posterior=True,
724
+ min_value=0.1,
725
+ max_value=0.2,
726
+ verbose=1,
727
+ imputation_order="random",
728
+ random_state=rng,
729
+ )
730
+ Xt = imputer.fit_transform(X)
731
+ assert_allclose(np.min(Xt[X == 0]), 0.1)
732
+ assert_allclose(np.max(Xt[X == 0]), 0.2)
733
+ assert_allclose(Xt[X != 0], X[X != 0])
734
+
735
+
736
+ def test_iterative_imputer_truncated_normal_posterior():
737
+ # test that the values that are imputed using `sample_posterior=True`
738
+ # with boundaries (`min_value` and `max_value` are not None) are drawn
739
+ # from a distribution that looks gaussian via the Kolmogorov Smirnov test.
740
+ # note that starting from the wrong random seed will make this test fail
741
+ # because random sampling doesn't occur at all when the imputation
742
+ # is outside of the (min_value, max_value) range
743
+ rng = np.random.RandomState(42)
744
+
745
+ X = rng.normal(size=(5, 5))
746
+ X[0][0] = np.nan
747
+
748
+ imputer = IterativeImputer(
749
+ min_value=0, max_value=0.5, sample_posterior=True, random_state=rng
750
+ )
751
+
752
+ imputer.fit_transform(X)
753
+ # generate multiple imputations for the single missing value
754
+ imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)])
755
+
756
+ assert all(imputations >= 0)
757
+ assert all(imputations <= 0.5)
758
+
759
+ mu, sigma = imputations.mean(), imputations.std()
760
+ ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
761
+ if sigma == 0:
762
+ sigma += 1e-12
763
+ ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
764
+ # we want to fail to reject null hypothesis
765
+ # null hypothesis: distributions are the same
766
+ assert ks_statistic < 0.2 or p_value > 0.1, "The posterior does appear to be normal"
767
+
768
+
769
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
770
+ def test_iterative_imputer_missing_at_transform(strategy):
771
+ rng = np.random.RandomState(0)
772
+ n = 100
773
+ d = 10
774
+ X_train = rng.randint(low=0, high=3, size=(n, d))
775
+ X_test = rng.randint(low=0, high=3, size=(n, d))
776
+
777
+ X_train[:, 0] = 1 # definitely no missing values in 0th column
778
+ X_test[0, 0] = 0 # definitely missing value in 0th column
779
+
780
+ imputer = IterativeImputer(
781
+ missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng
782
+ ).fit(X_train)
783
+ initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train)
784
+
785
+ # if there were no missing values at time of fit, then imputer will
786
+ # only use the initial imputer for that feature at transform
787
+ assert_allclose(
788
+ imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]
789
+ )
790
+
791
+
792
+ def test_iterative_imputer_transform_stochasticity():
793
+ rng1 = np.random.RandomState(0)
794
+ rng2 = np.random.RandomState(1)
795
+ n = 100
796
+ d = 10
797
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray()
798
+
799
+ # when sample_posterior=True, two transforms shouldn't be equal
800
+ imputer = IterativeImputer(
801
+ missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1
802
+ )
803
+ imputer.fit(X)
804
+
805
+ X_fitted_1 = imputer.transform(X)
806
+ X_fitted_2 = imputer.transform(X)
807
+
808
+ # sufficient to assert that the means are not the same
809
+ assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2))
810
+
811
+ # when sample_posterior=False, and n_nearest_features=None
812
+ # and imputation_order is not random
813
+ # the two transforms should be identical even if rng are different
814
+ imputer1 = IterativeImputer(
815
+ missing_values=0,
816
+ max_iter=1,
817
+ sample_posterior=False,
818
+ n_nearest_features=None,
819
+ imputation_order="ascending",
820
+ random_state=rng1,
821
+ )
822
+
823
+ imputer2 = IterativeImputer(
824
+ missing_values=0,
825
+ max_iter=1,
826
+ sample_posterior=False,
827
+ n_nearest_features=None,
828
+ imputation_order="ascending",
829
+ random_state=rng2,
830
+ )
831
+ imputer1.fit(X)
832
+ imputer2.fit(X)
833
+
834
+ X_fitted_1a = imputer1.transform(X)
835
+ X_fitted_1b = imputer1.transform(X)
836
+ X_fitted_2 = imputer2.transform(X)
837
+
838
+ assert_allclose(X_fitted_1a, X_fitted_1b)
839
+ assert_allclose(X_fitted_1a, X_fitted_2)
840
+
841
+
842
+ def test_iterative_imputer_no_missing():
843
+ rng = np.random.RandomState(0)
844
+ X = rng.rand(100, 100)
845
+ X[:, 0] = np.nan
846
+ m1 = IterativeImputer(max_iter=10, random_state=rng)
847
+ m2 = IterativeImputer(max_iter=10, random_state=rng)
848
+ pred1 = m1.fit(X).transform(X)
849
+ pred2 = m2.fit_transform(X)
850
+ # should exclude the first column entirely
851
+ assert_allclose(X[:, 1:], pred1)
852
+ # fit and fit_transform should both be identical
853
+ assert_allclose(pred1, pred2)
854
+
855
+
856
+ def test_iterative_imputer_rank_one():
857
+ rng = np.random.RandomState(0)
858
+ d = 50
859
+ A = rng.rand(d, 1)
860
+ B = rng.rand(1, d)
861
+ X = np.dot(A, B)
862
+ nan_mask = rng.rand(d, d) < 0.5
863
+ X_missing = X.copy()
864
+ X_missing[nan_mask] = np.nan
865
+
866
+ imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng)
867
+ X_filled = imputer.fit_transform(X_missing)
868
+ assert_allclose(X_filled, X, atol=0.02)
869
+
870
+
871
+ @pytest.mark.parametrize("rank", [3, 5])
872
+ def test_iterative_imputer_transform_recovery(rank):
873
+ rng = np.random.RandomState(0)
874
+ n = 70
875
+ d = 70
876
+ A = rng.rand(n, rank)
877
+ B = rng.rand(rank, d)
878
+ X_filled = np.dot(A, B)
879
+ nan_mask = rng.rand(n, d) < 0.5
880
+ X_missing = X_filled.copy()
881
+ X_missing[nan_mask] = np.nan
882
+
883
+ # split up data in half
884
+ n = n // 2
885
+ X_train = X_missing[:n]
886
+ X_test_filled = X_filled[n:]
887
+ X_test = X_missing[n:]
888
+
889
+ imputer = IterativeImputer(
890
+ max_iter=5, imputation_order="descending", verbose=1, random_state=rng
891
+ ).fit(X_train)
892
+ X_test_est = imputer.transform(X_test)
893
+ assert_allclose(X_test_filled, X_test_est, atol=0.1)
894
+
895
+
896
+ def test_iterative_imputer_additive_matrix():
897
+ rng = np.random.RandomState(0)
898
+ n = 100
899
+ d = 10
900
+ A = rng.randn(n, d)
901
+ B = rng.randn(n, d)
902
+ X_filled = np.zeros(A.shape)
903
+ for i in range(d):
904
+ for j in range(d):
905
+ X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2
906
+ # a quarter is randomly missing
907
+ nan_mask = rng.rand(n, d) < 0.25
908
+ X_missing = X_filled.copy()
909
+ X_missing[nan_mask] = np.nan
910
+
911
+ # split up data
912
+ n = n // 2
913
+ X_train = X_missing[:n]
914
+ X_test_filled = X_filled[n:]
915
+ X_test = X_missing[n:]
916
+
917
+ imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train)
918
+ X_test_est = imputer.transform(X_test)
919
+ assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01)
920
+
921
+
922
+ def test_iterative_imputer_early_stopping():
923
+ rng = np.random.RandomState(0)
924
+ n = 50
925
+ d = 5
926
+ A = rng.rand(n, 1)
927
+ B = rng.rand(1, d)
928
+ X = np.dot(A, B)
929
+ nan_mask = rng.rand(n, d) < 0.5
930
+ X_missing = X.copy()
931
+ X_missing[nan_mask] = np.nan
932
+
933
+ imputer = IterativeImputer(
934
+ max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng
935
+ )
936
+ X_filled_100 = imputer.fit_transform(X_missing)
937
+ assert len(imputer.imputation_sequence_) == d * imputer.n_iter_
938
+
939
+ imputer = IterativeImputer(
940
+ max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng
941
+ )
942
+ X_filled_early = imputer.fit_transform(X_missing)
943
+ assert_allclose(X_filled_100, X_filled_early, atol=1e-7)
944
+
945
+ imputer = IterativeImputer(
946
+ max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng
947
+ )
948
+ imputer.fit(X_missing)
949
+ assert imputer.n_iter_ == imputer.max_iter
950
+
951
+
952
+ def test_iterative_imputer_catch_warning():
953
+ # check that we catch a RuntimeWarning due to a division by zero when a
954
+ # feature is constant in the dataset
955
+ X, y = load_diabetes(return_X_y=True)
956
+ n_samples, n_features = X.shape
957
+
958
+ # simulate that a feature only contain one category during fit
959
+ X[:, 3] = 1
960
+
961
+ # add some missing values
962
+ rng = np.random.RandomState(0)
963
+ missing_rate = 0.15
964
+ for feat in range(n_features):
965
+ sample_idx = rng.choice(
966
+ np.arange(n_samples), size=int(n_samples * missing_rate), replace=False
967
+ )
968
+ X[sample_idx, feat] = np.nan
969
+
970
+ imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True)
971
+ with warnings.catch_warnings():
972
+ warnings.simplefilter("error", RuntimeWarning)
973
+ X_fill = imputer.fit_transform(X, y)
974
+ assert not np.any(np.isnan(X_fill))
975
+
976
+
977
+ @pytest.mark.parametrize(
978
+ "min_value, max_value, correct_output",
979
+ [
980
+ (0, 100, np.array([[0] * 3, [100] * 3])),
981
+ (None, None, np.array([[-np.inf] * 3, [np.inf] * 3])),
982
+ (-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])),
983
+ ([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])),
984
+ (
985
+ [-5, -np.inf, 10],
986
+ [100, 200, np.inf],
987
+ np.array([[-5, -np.inf, 10], [100, 200, np.inf]]),
988
+ ),
989
+ ],
990
+ ids=["scalars", "None-default", "inf", "lists", "lists-with-inf"],
991
+ )
992
+ def test_iterative_imputer_min_max_array_like(min_value, max_value, correct_output):
993
+ # check that passing scalar or array-like
994
+ # for min_value and max_value in IterativeImputer works
995
+ X = np.random.RandomState(0).randn(10, 3)
996
+ imputer = IterativeImputer(min_value=min_value, max_value=max_value)
997
+ imputer.fit(X)
998
+
999
+ assert isinstance(imputer._min_value, np.ndarray) and isinstance(
1000
+ imputer._max_value, np.ndarray
1001
+ )
1002
+ assert (imputer._min_value.shape[0] == X.shape[1]) and (
1003
+ imputer._max_value.shape[0] == X.shape[1]
1004
+ )
1005
+
1006
+ assert_allclose(correct_output[0, :], imputer._min_value)
1007
+ assert_allclose(correct_output[1, :], imputer._max_value)
1008
+
1009
+
1010
+ @pytest.mark.parametrize(
1011
+ "min_value, max_value, err_msg",
1012
+ [
1013
+ (100, 0, "min_value >= max_value."),
1014
+ (np.inf, -np.inf, "min_value >= max_value."),
1015
+ ([-5, 5], [100, 200, 0], "_value' should be of shape"),
1016
+ ],
1017
+ )
1018
+ def test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg):
1019
+ # check that passing scalar or array-like
1020
+ # for min_value and max_value in IterativeImputer works
1021
+ X = np.random.random((10, 3))
1022
+ imputer = IterativeImputer(min_value=min_value, max_value=max_value)
1023
+ with pytest.raises(ValueError, match=err_msg):
1024
+ imputer.fit(X)
1025
+
1026
+
1027
+ @pytest.mark.parametrize(
1028
+ "min_max_1, min_max_2",
1029
+ [([None, None], [-np.inf, np.inf]), ([-10, 10], [[-10] * 4, [10] * 4])],
1030
+ ids=["None-vs-inf", "Scalar-vs-vector"],
1031
+ )
1032
+ def test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2):
1033
+ # Test that None/inf and scalar/vector give the same imputation
1034
+ X_train = np.array(
1035
+ [
1036
+ [np.nan, 2, 2, 1],
1037
+ [10, np.nan, np.nan, 7],
1038
+ [3, 1, np.nan, 1],
1039
+ [np.nan, 4, 2, np.nan],
1040
+ ]
1041
+ )
1042
+ X_test = np.array(
1043
+ [[np.nan, 2, np.nan, 5], [2, 4, np.nan, np.nan], [np.nan, 1, 10, 1]]
1044
+ )
1045
+ imputer1 = IterativeImputer(
1046
+ min_value=min_max_1[0], max_value=min_max_1[1], random_state=0
1047
+ )
1048
+ imputer2 = IterativeImputer(
1049
+ min_value=min_max_2[0], max_value=min_max_2[1], random_state=0
1050
+ )
1051
+ X_test_imputed1 = imputer1.fit(X_train).transform(X_test)
1052
+ X_test_imputed2 = imputer2.fit(X_train).transform(X_test)
1053
+ assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0])
1054
+
1055
+
1056
+ @pytest.mark.parametrize("skip_complete", [True, False])
1057
+ def test_iterative_imputer_skip_non_missing(skip_complete):
1058
+ # check the imputing strategy when missing data are present in the
1059
+ # testing set only.
1060
+ # taken from: https://github.com/scikit-learn/scikit-learn/issues/14383
1061
+ rng = np.random.RandomState(0)
1062
+ X_train = np.array([[5, 2, 2, 1], [10, 1, 2, 7], [3, 1, 1, 1], [8, 4, 2, 2]])
1063
+ X_test = np.array([[np.nan, 2, 4, 5], [np.nan, 4, 1, 2], [np.nan, 1, 10, 1]])
1064
+ imputer = IterativeImputer(
1065
+ initial_strategy="mean", skip_complete=skip_complete, random_state=rng
1066
+ )
1067
+ X_test_est = imputer.fit(X_train).transform(X_test)
1068
+ if skip_complete:
1069
+ # impute with the initial strategy: 'mean'
1070
+ assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0]))
1071
+ else:
1072
+ assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4)
1073
+
1074
+
1075
+ @pytest.mark.parametrize("rs_imputer", [None, 1, np.random.RandomState(seed=1)])
1076
+ @pytest.mark.parametrize("rs_estimator", [None, 1, np.random.RandomState(seed=1)])
1077
+ def test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator):
1078
+ class ZeroEstimator:
1079
+ def __init__(self, random_state):
1080
+ self.random_state = random_state
1081
+
1082
+ def fit(self, *args, **kgards):
1083
+ return self
1084
+
1085
+ def predict(self, X):
1086
+ return np.zeros(X.shape[0])
1087
+
1088
+ estimator = ZeroEstimator(random_state=rs_estimator)
1089
+ imputer = IterativeImputer(random_state=rs_imputer)
1090
+ X_train = np.zeros((10, 3))
1091
+ imputer.fit(X_train)
1092
+ assert estimator.random_state == rs_estimator
1093
+
1094
+
1095
+ @pytest.mark.parametrize(
1096
+ "X_fit, X_trans, params, msg_err",
1097
+ [
1098
+ (
1099
+ np.array([[-1, 1], [1, 2]]),
1100
+ np.array([[-1, 1], [1, -1]]),
1101
+ {"features": "missing-only", "sparse": "auto"},
1102
+ "have missing values in transform but have no missing values in fit",
1103
+ ),
1104
+ (
1105
+ np.array([["a", "b"], ["c", "a"]], dtype=str),
1106
+ np.array([["a", "b"], ["c", "a"]], dtype=str),
1107
+ {},
1108
+ "MissingIndicator does not support data with dtype",
1109
+ ),
1110
+ ],
1111
+ )
1112
+ def test_missing_indicator_error(X_fit, X_trans, params, msg_err):
1113
+ indicator = MissingIndicator(missing_values=-1)
1114
+ indicator.set_params(**params)
1115
+ with pytest.raises(ValueError, match=msg_err):
1116
+ indicator.fit(X_fit).transform(X_trans)
1117
+
1118
+
1119
+ def _generate_missing_indicator_cases():
1120
+ missing_values_dtypes = [(0, np.int32), (np.nan, np.float64), (-1, np.int32)]
1121
+ arr_types = (
1122
+ [np.array]
1123
+ + CSC_CONTAINERS
1124
+ + CSR_CONTAINERS
1125
+ + COO_CONTAINERS
1126
+ + LIL_CONTAINERS
1127
+ + BSR_CONTAINERS
1128
+ )
1129
+ return [
1130
+ (arr_type, missing_values, dtype)
1131
+ for arr_type, (missing_values, dtype) in product(
1132
+ arr_types, missing_values_dtypes
1133
+ )
1134
+ if not (missing_values == 0 and arr_type is not np.array)
1135
+ ]
1136
+
1137
+
1138
+ @pytest.mark.parametrize(
1139
+ "arr_type, missing_values, dtype", _generate_missing_indicator_cases()
1140
+ )
1141
+ @pytest.mark.parametrize(
1142
+ "param_features, n_features, features_indices",
1143
+ [("missing-only", 3, np.array([0, 1, 2])), ("all", 3, np.array([0, 1, 2]))],
1144
+ )
1145
+ def test_missing_indicator_new(
1146
+ missing_values, arr_type, dtype, param_features, n_features, features_indices
1147
+ ):
1148
+ X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]])
1149
+ X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
1150
+ X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]])
1151
+ X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]])
1152
+
1153
+ # convert the input to the right array format and right dtype
1154
+ X_fit = arr_type(X_fit).astype(dtype)
1155
+ X_trans = arr_type(X_trans).astype(dtype)
1156
+ X_fit_expected = X_fit_expected.astype(dtype)
1157
+ X_trans_expected = X_trans_expected.astype(dtype)
1158
+
1159
+ indicator = MissingIndicator(
1160
+ missing_values=missing_values, features=param_features, sparse=False
1161
+ )
1162
+ X_fit_mask = indicator.fit_transform(X_fit)
1163
+ X_trans_mask = indicator.transform(X_trans)
1164
+
1165
+ assert X_fit_mask.shape[1] == n_features
1166
+ assert X_trans_mask.shape[1] == n_features
1167
+
1168
+ assert_array_equal(indicator.features_, features_indices)
1169
+ assert_allclose(X_fit_mask, X_fit_expected[:, features_indices])
1170
+ assert_allclose(X_trans_mask, X_trans_expected[:, features_indices])
1171
+
1172
+ assert X_fit_mask.dtype == bool
1173
+ assert X_trans_mask.dtype == bool
1174
+ assert isinstance(X_fit_mask, np.ndarray)
1175
+ assert isinstance(X_trans_mask, np.ndarray)
1176
+
1177
+ indicator.set_params(sparse=True)
1178
+ X_fit_mask_sparse = indicator.fit_transform(X_fit)
1179
+ X_trans_mask_sparse = indicator.transform(X_trans)
1180
+
1181
+ assert X_fit_mask_sparse.dtype == bool
1182
+ assert X_trans_mask_sparse.dtype == bool
1183
+ assert X_fit_mask_sparse.format == "csc"
1184
+ assert X_trans_mask_sparse.format == "csc"
1185
+ assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask)
1186
+ assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask)
1187
+
1188
+
1189
+ @pytest.mark.parametrize(
1190
+ "arr_type",
1191
+ CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS,
1192
+ )
1193
+ def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type):
1194
+ # test for sparse input and missing_value == 0
1195
+
1196
+ missing_values = 0
1197
+ X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
1198
+ X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
1199
+
1200
+ # convert the input to the right array format
1201
+ X_fit_sparse = arr_type(X_fit)
1202
+ X_trans_sparse = arr_type(X_trans)
1203
+
1204
+ indicator = MissingIndicator(missing_values=missing_values)
1205
+
1206
+ with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
1207
+ indicator.fit_transform(X_fit_sparse)
1208
+
1209
+ indicator.fit_transform(X_fit)
1210
+ with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
1211
+ indicator.transform(X_trans_sparse)
1212
+
1213
+
1214
+ @pytest.mark.parametrize("param_sparse", [True, False, "auto"])
1215
+ @pytest.mark.parametrize(
1216
+ "arr_type, missing_values",
1217
+ [(np.array, 0)]
1218
+ + list(
1219
+ product(
1220
+ CSC_CONTAINERS
1221
+ + CSR_CONTAINERS
1222
+ + COO_CONTAINERS
1223
+ + LIL_CONTAINERS
1224
+ + BSR_CONTAINERS,
1225
+ [np.nan],
1226
+ )
1227
+ ),
1228
+ )
1229
+ def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse):
1230
+ # check the format of the output with different sparse parameter
1231
+ X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
1232
+ X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
1233
+ X_fit = arr_type(X_fit).astype(np.float64)
1234
+ X_trans = arr_type(X_trans).astype(np.float64)
1235
+
1236
+ indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse)
1237
+ X_fit_mask = indicator.fit_transform(X_fit)
1238
+ X_trans_mask = indicator.transform(X_trans)
1239
+
1240
+ if param_sparse is True:
1241
+ assert X_fit_mask.format == "csc"
1242
+ assert X_trans_mask.format == "csc"
1243
+ elif param_sparse == "auto" and missing_values == 0:
1244
+ assert isinstance(X_fit_mask, np.ndarray)
1245
+ assert isinstance(X_trans_mask, np.ndarray)
1246
+ elif param_sparse is False:
1247
+ assert isinstance(X_fit_mask, np.ndarray)
1248
+ assert isinstance(X_trans_mask, np.ndarray)
1249
+ else:
1250
+ if sparse.issparse(X_fit):
1251
+ assert X_fit_mask.format == "csc"
1252
+ assert X_trans_mask.format == "csc"
1253
+ else:
1254
+ assert isinstance(X_fit_mask, np.ndarray)
1255
+ assert isinstance(X_trans_mask, np.ndarray)
1256
+
1257
+
1258
+ def test_missing_indicator_string():
1259
+ X = np.array([["a", "b", "c"], ["b", "c", "a"]], dtype=object)
1260
+ indicator = MissingIndicator(missing_values="a", features="all")
1261
+ X_trans = indicator.fit_transform(X)
1262
+ assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]]))
1263
+
1264
+
1265
+ @pytest.mark.parametrize(
1266
+ "X, missing_values, X_trans_exp",
1267
+ [
1268
+ (
1269
+ np.array([["a", "b"], ["b", "a"]], dtype=object),
1270
+ "a",
1271
+ np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
1272
+ ),
1273
+ (
1274
+ np.array([[np.nan, 1.0], [1.0, np.nan]]),
1275
+ np.nan,
1276
+ np.array([[1.0, 1.0, True, False], [1.0, 1.0, False, True]]),
1277
+ ),
1278
+ (
1279
+ np.array([[np.nan, "b"], ["b", np.nan]], dtype=object),
1280
+ np.nan,
1281
+ np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
1282
+ ),
1283
+ (
1284
+ np.array([[None, "b"], ["b", None]], dtype=object),
1285
+ None,
1286
+ np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
1287
+ ),
1288
+ ],
1289
+ )
1290
+ def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp):
1291
+ trans = make_union(
1292
+ SimpleImputer(missing_values=missing_values, strategy="most_frequent"),
1293
+ MissingIndicator(missing_values=missing_values),
1294
+ )
1295
+ X_trans = trans.fit_transform(X)
1296
+ assert_array_equal(X_trans, X_trans_exp)
1297
+
1298
+
1299
+ @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
1300
+ @pytest.mark.parametrize(
1301
+ "imputer_missing_values, missing_value, err_msg",
1302
+ [
1303
+ ("NaN", np.nan, "Input X contains NaN"),
1304
+ ("-1", -1, "types are expected to be both numerical."),
1305
+ ],
1306
+ )
1307
+ def test_inconsistent_dtype_X_missing_values(
1308
+ imputer_constructor, imputer_missing_values, missing_value, err_msg
1309
+ ):
1310
+ # regression test for issue #11390. Comparison between incoherent dtype
1311
+ # for X and missing_values was not raising a proper error.
1312
+ rng = np.random.RandomState(42)
1313
+ X = rng.randn(10, 10)
1314
+ X[0, 0] = missing_value
1315
+
1316
+ imputer = imputer_constructor(missing_values=imputer_missing_values)
1317
+
1318
+ with pytest.raises(ValueError, match=err_msg):
1319
+ imputer.fit_transform(X)
1320
+
1321
+
1322
+ def test_missing_indicator_no_missing():
1323
+ # check that all features are dropped if there are no missing values when
1324
+ # features='missing-only' (#13491)
1325
+ X = np.array([[1, 1], [1, 1]])
1326
+
1327
+ mi = MissingIndicator(features="missing-only", missing_values=-1)
1328
+ Xt = mi.fit_transform(X)
1329
+
1330
+ assert Xt.shape[1] == 0
1331
+
1332
+
1333
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1334
+ def test_missing_indicator_sparse_no_explicit_zeros(csr_container):
1335
+ # Check that non missing values don't become explicit zeros in the mask
1336
+ # generated by missing indicator when X is sparse. (#13491)
1337
+ X = csr_container([[0, 1, 2], [1, 2, 0], [2, 0, 1]])
1338
+
1339
+ mi = MissingIndicator(features="all", missing_values=1)
1340
+ Xt = mi.fit_transform(X)
1341
+
1342
+ assert Xt.getnnz() == Xt.sum()
1343
+
1344
+
1345
+ @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
1346
+ def test_imputer_without_indicator(imputer_constructor):
1347
+ X = np.array([[1, 1], [1, 1]])
1348
+ imputer = imputer_constructor()
1349
+ imputer.fit(X)
1350
+
1351
+ assert imputer.indicator_ is None
1352
+
1353
+
1354
+ @pytest.mark.parametrize(
1355
+ "arr_type",
1356
+ CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS,
1357
+ )
1358
+ def test_simple_imputation_add_indicator_sparse_matrix(arr_type):
1359
+ X_sparse = arr_type([[np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9]])
1360
+ X_true = np.array(
1361
+ [
1362
+ [3.0, 1.0, 5.0, 1.0, 0.0, 0.0],
1363
+ [2.0, 2.0, 1.0, 0.0, 1.0, 0.0],
1364
+ [6.0, 3.0, 5.0, 0.0, 0.0, 1.0],
1365
+ [1.0, 2.0, 9.0, 0.0, 0.0, 0.0],
1366
+ ]
1367
+ )
1368
+
1369
+ imputer = SimpleImputer(missing_values=np.nan, add_indicator=True)
1370
+ X_trans = imputer.fit_transform(X_sparse)
1371
+
1372
+ assert sparse.issparse(X_trans)
1373
+ assert X_trans.shape == X_true.shape
1374
+ assert_allclose(X_trans.toarray(), X_true)
1375
+
1376
+
1377
+ @pytest.mark.parametrize(
1378
+ "strategy, expected", [("most_frequent", "b"), ("constant", "missing_value")]
1379
+ )
1380
+ def test_simple_imputation_string_list(strategy, expected):
1381
+ X = [["a", "b"], ["c", np.nan]]
1382
+
1383
+ X_true = np.array([["a", "b"], ["c", expected]], dtype=object)
1384
+
1385
+ imputer = SimpleImputer(strategy=strategy)
1386
+ X_trans = imputer.fit_transform(X)
1387
+
1388
+ assert_array_equal(X_trans, X_true)
1389
+
1390
+
1391
+ @pytest.mark.parametrize(
1392
+ "order, idx_order",
1393
+ [("ascending", [3, 4, 2, 0, 1]), ("descending", [1, 0, 2, 4, 3])],
1394
+ )
1395
+ def test_imputation_order(order, idx_order):
1396
+ # regression test for #15393
1397
+ rng = np.random.RandomState(42)
1398
+ X = rng.rand(100, 5)
1399
+ X[:50, 1] = np.nan
1400
+ X[:30, 0] = np.nan
1401
+ X[:20, 2] = np.nan
1402
+ X[:10, 4] = np.nan
1403
+
1404
+ with pytest.warns(ConvergenceWarning):
1405
+ trs = IterativeImputer(max_iter=1, imputation_order=order, random_state=0).fit(
1406
+ X
1407
+ )
1408
+ idx = [x.feat_idx for x in trs.imputation_sequence_]
1409
+ assert idx == idx_order
1410
+
1411
+
1412
+ @pytest.mark.parametrize("missing_value", [-1, np.nan])
1413
+ def test_simple_imputation_inverse_transform(missing_value):
1414
+ # Test inverse_transform feature for np.nan
1415
+ X_1 = np.array(
1416
+ [
1417
+ [9, missing_value, 3, -1],
1418
+ [4, -1, 5, 4],
1419
+ [6, 7, missing_value, -1],
1420
+ [8, 9, 0, missing_value],
1421
+ ]
1422
+ )
1423
+
1424
+ X_2 = np.array(
1425
+ [
1426
+ [5, 4, 2, 1],
1427
+ [2, 1, missing_value, 3],
1428
+ [9, missing_value, 7, 1],
1429
+ [6, 4, 2, missing_value],
1430
+ ]
1431
+ )
1432
+
1433
+ X_3 = np.array(
1434
+ [
1435
+ [1, missing_value, 5, 9],
1436
+ [missing_value, 4, missing_value, missing_value],
1437
+ [2, missing_value, 7, missing_value],
1438
+ [missing_value, 3, missing_value, 8],
1439
+ ]
1440
+ )
1441
+
1442
+ X_4 = np.array(
1443
+ [
1444
+ [1, 1, 1, 3],
1445
+ [missing_value, 2, missing_value, 1],
1446
+ [2, 3, 3, 4],
1447
+ [missing_value, 4, missing_value, 2],
1448
+ ]
1449
+ )
1450
+
1451
+ imputer = SimpleImputer(
1452
+ missing_values=missing_value, strategy="mean", add_indicator=True
1453
+ )
1454
+
1455
+ X_1_trans = imputer.fit_transform(X_1)
1456
+ X_1_inv_trans = imputer.inverse_transform(X_1_trans)
1457
+
1458
+ X_2_trans = imputer.transform(X_2) # test on new data
1459
+ X_2_inv_trans = imputer.inverse_transform(X_2_trans)
1460
+
1461
+ assert_array_equal(X_1_inv_trans, X_1)
1462
+ assert_array_equal(X_2_inv_trans, X_2)
1463
+
1464
+ for X in [X_3, X_4]:
1465
+ X_trans = imputer.fit_transform(X)
1466
+ X_inv_trans = imputer.inverse_transform(X_trans)
1467
+ assert_array_equal(X_inv_trans, X)
1468
+
1469
+
1470
+ @pytest.mark.parametrize("missing_value", [-1, np.nan])
1471
+ def test_simple_imputation_inverse_transform_exceptions(missing_value):
1472
+ X_1 = np.array(
1473
+ [
1474
+ [9, missing_value, 3, -1],
1475
+ [4, -1, 5, 4],
1476
+ [6, 7, missing_value, -1],
1477
+ [8, 9, 0, missing_value],
1478
+ ]
1479
+ )
1480
+
1481
+ imputer = SimpleImputer(missing_values=missing_value, strategy="mean")
1482
+ X_1_trans = imputer.fit_transform(X_1)
1483
+ with pytest.raises(
1484
+ ValueError, match=f"Got 'add_indicator={imputer.add_indicator}'"
1485
+ ):
1486
+ imputer.inverse_transform(X_1_trans)
1487
+
1488
+
1489
+ @pytest.mark.parametrize(
1490
+ "expected,array,dtype,extra_value,n_repeat",
1491
+ [
1492
+ # array of object dtype
1493
+ ("extra_value", ["a", "b", "c"], object, "extra_value", 2),
1494
+ (
1495
+ "most_frequent_value",
1496
+ ["most_frequent_value", "most_frequent_value", "value"],
1497
+ object,
1498
+ "extra_value",
1499
+ 1,
1500
+ ),
1501
+ ("a", ["min_value", "min_valuevalue"], object, "a", 2),
1502
+ ("min_value", ["min_value", "min_value", "value"], object, "z", 2),
1503
+ # array of numeric dtype
1504
+ (10, [1, 2, 3], int, 10, 2),
1505
+ (1, [1, 1, 2], int, 10, 1),
1506
+ (10, [20, 20, 1], int, 10, 2),
1507
+ (1, [1, 1, 20], int, 10, 2),
1508
+ ],
1509
+ )
1510
+ def test_most_frequent(expected, array, dtype, extra_value, n_repeat):
1511
+ assert expected == _most_frequent(
1512
+ np.array(array, dtype=dtype), extra_value, n_repeat
1513
+ )
1514
+
1515
+
1516
+ @pytest.mark.parametrize(
1517
+ "initial_strategy", ["mean", "median", "most_frequent", "constant"]
1518
+ )
1519
+ def test_iterative_imputer_keep_empty_features(initial_strategy):
1520
+ """Check the behaviour of the iterative imputer with different initial strategy
1521
+ and keeping empty features (i.e. features containing only missing values).
1522
+ """
1523
+ X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
1524
+
1525
+ imputer = IterativeImputer(
1526
+ initial_strategy=initial_strategy, keep_empty_features=True
1527
+ )
1528
+ X_imputed = imputer.fit_transform(X)
1529
+ assert_allclose(X_imputed[:, 1], 0)
1530
+ X_imputed = imputer.transform(X)
1531
+ assert_allclose(X_imputed[:, 1], 0)
1532
+
1533
+
1534
+ def test_iterative_imputer_constant_fill_value():
1535
+ """Check that we propagate properly the parameter `fill_value`."""
1536
+ X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])
1537
+
1538
+ fill_value = 100
1539
+ imputer = IterativeImputer(
1540
+ missing_values=-1,
1541
+ initial_strategy="constant",
1542
+ fill_value=fill_value,
1543
+ max_iter=0,
1544
+ )
1545
+ imputer.fit_transform(X)
1546
+ assert_array_equal(imputer.initial_imputer_.statistics_, fill_value)
1547
+
1548
+
1549
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
1550
+ def test_knn_imputer_keep_empty_features(keep_empty_features):
1551
+ """Check the behaviour of `keep_empty_features` for `KNNImputer`."""
1552
+ X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
1553
+
1554
+ imputer = KNNImputer(keep_empty_features=keep_empty_features)
1555
+
1556
+ for method in ["fit_transform", "transform"]:
1557
+ X_imputed = getattr(imputer, method)(X)
1558
+ if keep_empty_features:
1559
+ assert X_imputed.shape == X.shape
1560
+ assert_array_equal(X_imputed[:, 1], 0)
1561
+ else:
1562
+ assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
1563
+
1564
+
1565
+ def test_simple_impute_pd_na():
1566
+ pd = pytest.importorskip("pandas")
1567
+
1568
+ # Impute pandas array of string types.
1569
+ df = pd.DataFrame({"feature": pd.Series(["abc", None, "de"], dtype="string")})
1570
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value="na")
1571
+ _assert_array_equal_and_same_dtype(
1572
+ imputer.fit_transform(df), np.array([["abc"], ["na"], ["de"]], dtype=object)
1573
+ )
1574
+
1575
+ # Impute pandas array of string types without any missing values.
1576
+ df = pd.DataFrame({"feature": pd.Series(["abc", "de", "fgh"], dtype="string")})
1577
+ imputer = SimpleImputer(fill_value="ok", strategy="constant")
1578
+ _assert_array_equal_and_same_dtype(
1579
+ imputer.fit_transform(df), np.array([["abc"], ["de"], ["fgh"]], dtype=object)
1580
+ )
1581
+
1582
+ # Impute pandas array of integer types.
1583
+ df = pd.DataFrame({"feature": pd.Series([1, None, 3], dtype="Int64")})
1584
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-1)
1585
+ _assert_allclose_and_same_dtype(
1586
+ imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
1587
+ )
1588
+
1589
+ # Use `np.nan` also works.
1590
+ imputer = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=-1)
1591
+ _assert_allclose_and_same_dtype(
1592
+ imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
1593
+ )
1594
+
1595
+ # Impute pandas array of integer types with 'median' strategy.
1596
+ df = pd.DataFrame({"feature": pd.Series([1, None, 2, 3], dtype="Int64")})
1597
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
1598
+ _assert_allclose_and_same_dtype(
1599
+ imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype="float64")
1600
+ )
1601
+
1602
+ # Impute pandas array of integer types with 'mean' strategy.
1603
+ df = pd.DataFrame({"feature": pd.Series([1, None, 2], dtype="Int64")})
1604
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="mean")
1605
+ _assert_allclose_and_same_dtype(
1606
+ imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype="float64")
1607
+ )
1608
+
1609
+ # Impute pandas array of float types.
1610
+ df = pd.DataFrame({"feature": pd.Series([1.0, None, 3.0], dtype="float64")})
1611
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-2.0)
1612
+ _assert_allclose_and_same_dtype(
1613
+ imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype="float64")
1614
+ )
1615
+
1616
+ # Impute pandas array of float types with 'median' strategy.
1617
+ df = pd.DataFrame({"feature": pd.Series([1.0, None, 2.0, 3.0], dtype="float64")})
1618
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
1619
+ _assert_allclose_and_same_dtype(
1620
+ imputer.fit_transform(df),
1621
+ np.array([[1.0], [2.0], [2.0], [3.0]], dtype="float64"),
1622
+ )
1623
+
1624
+
1625
+ def test_missing_indicator_feature_names_out():
1626
+ """Check that missing indicator return the feature names with a prefix."""
1627
+ pd = pytest.importorskip("pandas")
1628
+
1629
+ missing_values = np.nan
1630
+ X = pd.DataFrame(
1631
+ [
1632
+ [missing_values, missing_values, 1, missing_values],
1633
+ [4, missing_values, 2, 10],
1634
+ ],
1635
+ columns=["a", "b", "c", "d"],
1636
+ )
1637
+
1638
+ indicator = MissingIndicator(missing_values=missing_values).fit(X)
1639
+ feature_names = indicator.get_feature_names_out()
1640
+ expected_names = ["missingindicator_a", "missingindicator_b", "missingindicator_d"]
1641
+ assert_array_equal(expected_names, feature_names)
1642
+
1643
+
1644
+ def test_imputer_lists_fit_transform():
1645
+ """Check transform uses object dtype when fitted on an object dtype.
1646
+
1647
+ Non-regression test for #19572.
1648
+ """
1649
+
1650
+ X = [["a", "b"], ["c", "b"], ["a", "a"]]
1651
+ imp_frequent = SimpleImputer(strategy="most_frequent").fit(X)
1652
+ X_trans = imp_frequent.transform([[np.nan, np.nan]])
1653
+ assert X_trans.dtype == object
1654
+ assert_array_equal(X_trans, [["a", "b"]])
1655
+
1656
+
1657
+ @pytest.mark.parametrize("dtype_test", [np.float32, np.float64])
1658
+ def test_imputer_transform_preserves_numeric_dtype(dtype_test):
1659
+ """Check transform preserves numeric dtype independent of fit dtype."""
1660
+ X = np.asarray(
1661
+ [[1.2, 3.4, np.nan], [np.nan, 1.2, 1.3], [4.2, 2, 1]], dtype=np.float64
1662
+ )
1663
+ imp = SimpleImputer().fit(X)
1664
+
1665
+ X_test = np.asarray([[np.nan, np.nan, np.nan]], dtype=dtype_test)
1666
+ X_trans = imp.transform(X_test)
1667
+ assert X_trans.dtype == dtype_test
1668
+
1669
+
1670
+ @pytest.mark.parametrize("array_type", ["array", "sparse"])
1671
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
1672
+ def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features):
1673
+ """Check the behaviour of `keep_empty_features` with `strategy='constant'.
1674
+ For backward compatibility, a column full of missing values will always be
1675
+ fill and never dropped.
1676
+ """
1677
+ X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
1678
+ X = _convert_container(X, array_type)
1679
+ fill_value = 10
1680
+ imputer = SimpleImputer(
1681
+ strategy="constant",
1682
+ fill_value=fill_value,
1683
+ keep_empty_features=keep_empty_features,
1684
+ )
1685
+
1686
+ for method in ["fit_transform", "transform"]:
1687
+ X_imputed = getattr(imputer, method)(X)
1688
+ assert X_imputed.shape == X.shape
1689
+ constant_feature = (
1690
+ X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0]
1691
+ )
1692
+ assert_array_equal(constant_feature, fill_value)
1693
+
1694
+
1695
+ @pytest.mark.parametrize("array_type", ["array", "sparse"])
1696
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
1697
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
1698
+ def test_simple_imputer_keep_empty_features(strategy, array_type, keep_empty_features):
1699
+ """Check the behaviour of `keep_empty_features` with all strategies but
1700
+ 'constant'.
1701
+ """
1702
+ X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
1703
+ X = _convert_container(X, array_type)
1704
+ imputer = SimpleImputer(strategy=strategy, keep_empty_features=keep_empty_features)
1705
+
1706
+ for method in ["fit_transform", "transform"]:
1707
+ X_imputed = getattr(imputer, method)(X)
1708
+ if keep_empty_features:
1709
+ assert X_imputed.shape == X.shape
1710
+ constant_feature = (
1711
+ X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0]
1712
+ )
1713
+ assert_array_equal(constant_feature, 0)
1714
+ else:
1715
+ assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
1716
+
1717
+
1718
+ def test_simple_imputer_constant_fill_value_casting():
1719
+ """Check that we raise a proper error message when we cannot cast the fill value
1720
+ to the input data type. Otherwise, check that the casting is done properly.
1721
+
1722
+ Non-regression test for:
1723
+ https://github.com/scikit-learn/scikit-learn/issues/28309
1724
+ """
1725
+ # cannot cast fill_value at fit
1726
+ fill_value = 1.5
1727
+ X_int64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.int64)
1728
+ imputer = SimpleImputer(
1729
+ strategy="constant", fill_value=fill_value, missing_values=2
1730
+ )
1731
+ err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast"
1732
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
1733
+ imputer.fit(X_int64)
1734
+
1735
+ # cannot cast fill_value at transform
1736
+ X_float64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float64)
1737
+ imputer.fit(X_float64)
1738
+ err_msg = (
1739
+ f"The dtype of the filling value (i.e. {imputer.statistics_.dtype!r}) "
1740
+ "cannot be cast"
1741
+ )
1742
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
1743
+ imputer.transform(X_int64)
1744
+
1745
+ # check that no error is raised when having the same kind of dtype
1746
+ fill_value_list = [np.float64(1.5), 1.5, 1]
1747
+ X_float32 = X_float64.astype(np.float32)
1748
+
1749
+ for fill_value in fill_value_list:
1750
+ imputer = SimpleImputer(
1751
+ strategy="constant", fill_value=fill_value, missing_values=2
1752
+ )
1753
+ X_trans = imputer.fit_transform(X_float32)
1754
+ assert X_trans.dtype == X_float32.dtype
env-llmeval/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn import config_context
5
+ from sklearn.impute import KNNImputer
6
+ from sklearn.metrics.pairwise import nan_euclidean_distances, pairwise_distances
7
+ from sklearn.neighbors import KNeighborsRegressor
8
+ from sklearn.utils._testing import assert_allclose
9
+
10
+
11
+ @pytest.mark.parametrize("weights", ["uniform", "distance"])
12
+ @pytest.mark.parametrize("n_neighbors", range(1, 6))
13
+ def test_knn_imputer_shape(weights, n_neighbors):
14
+ # Verify the shapes of the imputed matrix for different weights and
15
+ # number of neighbors.
16
+ n_rows = 10
17
+ n_cols = 2
18
+ X = np.random.rand(n_rows, n_cols)
19
+ X[0, 0] = np.nan
20
+
21
+ imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights)
22
+ X_imputed = imputer.fit_transform(X)
23
+ assert X_imputed.shape == (n_rows, n_cols)
24
+
25
+
26
+ @pytest.mark.parametrize("na", [np.nan, -1])
27
+ def test_knn_imputer_default_with_invalid_input(na):
28
+ # Test imputation with default values and invalid input
29
+
30
+ # Test with inf present
31
+ X = np.array(
32
+ [
33
+ [np.inf, 1, 1, 2, na],
34
+ [2, 1, 2, 2, 3],
35
+ [3, 2, 3, 3, 8],
36
+ [na, 6, 0, 5, 13],
37
+ [na, 7, 0, 7, 8],
38
+ [6, 6, 2, 5, 7],
39
+ ]
40
+ )
41
+ with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"):
42
+ KNNImputer(missing_values=na).fit(X)
43
+
44
+ # Test with inf present in matrix passed in transform()
45
+ X = np.array(
46
+ [
47
+ [np.inf, 1, 1, 2, na],
48
+ [2, 1, 2, 2, 3],
49
+ [3, 2, 3, 3, 8],
50
+ [na, 6, 0, 5, 13],
51
+ [na, 7, 0, 7, 8],
52
+ [6, 6, 2, 5, 7],
53
+ ]
54
+ )
55
+
56
+ X_fit = np.array(
57
+ [
58
+ [0, 1, 1, 2, na],
59
+ [2, 1, 2, 2, 3],
60
+ [3, 2, 3, 3, 8],
61
+ [na, 6, 0, 5, 13],
62
+ [na, 7, 0, 7, 8],
63
+ [6, 6, 2, 5, 7],
64
+ ]
65
+ )
66
+ imputer = KNNImputer(missing_values=na).fit(X_fit)
67
+ with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"):
68
+ imputer.transform(X)
69
+
70
+ # Test with missing_values=0 when NaN present
71
+ imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
72
+ X = np.array(
73
+ [
74
+ [np.nan, 0, 0, 0, 5],
75
+ [np.nan, 1, 0, np.nan, 3],
76
+ [np.nan, 2, 0, 0, 0],
77
+ [np.nan, 6, 0, 5, 13],
78
+ ]
79
+ )
80
+ msg = "Input X contains NaN"
81
+ with pytest.raises(ValueError, match=msg):
82
+ imputer.fit(X)
83
+
84
+ X = np.array(
85
+ [
86
+ [0, 0],
87
+ [np.nan, 2],
88
+ ]
89
+ )
90
+
91
+
92
+ @pytest.mark.parametrize("na", [np.nan, -1])
93
+ def test_knn_imputer_removes_all_na_features(na):
94
+ X = np.array(
95
+ [
96
+ [1, 1, na, 1, 1, 1.0],
97
+ [2, 3, na, 2, 2, 2],
98
+ [3, 4, na, 3, 3, na],
99
+ [6, 4, na, na, 6, 6],
100
+ ]
101
+ )
102
+ knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X)
103
+
104
+ X_transform = knn.transform(X)
105
+ assert not np.isnan(X_transform).any()
106
+ assert X_transform.shape == (4, 5)
107
+
108
+ X_test = np.arange(0, 12).reshape(2, 6)
109
+ X_transform = knn.transform(X_test)
110
+ assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform)
111
+
112
+
113
+ @pytest.mark.parametrize("na", [np.nan, -1])
114
+ def test_knn_imputer_zero_nan_imputes_the_same(na):
115
+ # Test with an imputable matrix and compare with different missing_values
116
+ X_zero = np.array(
117
+ [
118
+ [1, 0, 1, 1, 1.0],
119
+ [2, 2, 2, 2, 2],
120
+ [3, 3, 3, 3, 0],
121
+ [6, 6, 0, 6, 6],
122
+ ]
123
+ )
124
+
125
+ X_nan = np.array(
126
+ [
127
+ [1, na, 1, 1, 1.0],
128
+ [2, 2, 2, 2, 2],
129
+ [3, 3, 3, 3, na],
130
+ [6, 6, na, 6, 6],
131
+ ]
132
+ )
133
+
134
+ X_imputed = np.array(
135
+ [
136
+ [1, 2.5, 1, 1, 1.0],
137
+ [2, 2, 2, 2, 2],
138
+ [3, 3, 3, 3, 1.5],
139
+ [6, 6, 2.5, 6, 6],
140
+ ]
141
+ )
142
+
143
+ imputer_zero = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
144
+
145
+ imputer_nan = KNNImputer(missing_values=na, n_neighbors=2, weights="uniform")
146
+
147
+ assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed)
148
+ assert_allclose(
149
+ imputer_zero.fit_transform(X_zero), imputer_nan.fit_transform(X_nan)
150
+ )
151
+
152
+
153
+ @pytest.mark.parametrize("na", [np.nan, -1])
154
+ def test_knn_imputer_verify(na):
155
+ # Test with an imputable matrix
156
+ X = np.array(
157
+ [
158
+ [1, 0, 0, 1],
159
+ [2, 1, 2, na],
160
+ [3, 2, 3, na],
161
+ [na, 4, 5, 5],
162
+ [6, na, 6, 7],
163
+ [8, 8, 8, 8],
164
+ [16, 15, 18, 19],
165
+ ]
166
+ )
167
+
168
+ X_imputed = np.array(
169
+ [
170
+ [1, 0, 0, 1],
171
+ [2, 1, 2, 8],
172
+ [3, 2, 3, 8],
173
+ [4, 4, 5, 5],
174
+ [6, 3, 6, 7],
175
+ [8, 8, 8, 8],
176
+ [16, 15, 18, 19],
177
+ ]
178
+ )
179
+
180
+ imputer = KNNImputer(missing_values=na)
181
+ assert_allclose(imputer.fit_transform(X), X_imputed)
182
+
183
+ # Test when there is not enough neighbors
184
+ X = np.array(
185
+ [
186
+ [1, 0, 0, na],
187
+ [2, 1, 2, na],
188
+ [3, 2, 3, na],
189
+ [4, 4, 5, na],
190
+ [6, 7, 6, na],
191
+ [8, 8, 8, na],
192
+ [20, 20, 20, 20],
193
+ [22, 22, 22, 22],
194
+ ]
195
+ )
196
+
197
+ # Not enough neighbors, use column mean from training
198
+ X_impute_value = (20 + 22) / 2
199
+ X_imputed = np.array(
200
+ [
201
+ [1, 0, 0, X_impute_value],
202
+ [2, 1, 2, X_impute_value],
203
+ [3, 2, 3, X_impute_value],
204
+ [4, 4, 5, X_impute_value],
205
+ [6, 7, 6, X_impute_value],
206
+ [8, 8, 8, X_impute_value],
207
+ [20, 20, 20, 20],
208
+ [22, 22, 22, 22],
209
+ ]
210
+ )
211
+
212
+ imputer = KNNImputer(missing_values=na)
213
+ assert_allclose(imputer.fit_transform(X), X_imputed)
214
+
215
+ # Test when data in fit() and transform() are different
216
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 16]])
217
+
218
+ X1 = np.array([[1, 0], [3, 2], [4, na]])
219
+
220
+ X_2_1 = (0 + 3 + 6 + 7 + 8) / 5
221
+ X1_imputed = np.array([[1, 0], [3, 2], [4, X_2_1]])
222
+
223
+ imputer = KNNImputer(missing_values=na)
224
+ assert_allclose(imputer.fit(X).transform(X1), X1_imputed)
225
+
226
+
227
+ @pytest.mark.parametrize("na", [np.nan, -1])
228
+ def test_knn_imputer_one_n_neighbors(na):
229
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
230
+
231
+ X_imputed = np.array([[0, 0], [4, 2], [4, 3], [5, 3], [7, 7], [7, 8], [14, 13]])
232
+
233
+ imputer = KNNImputer(n_neighbors=1, missing_values=na)
234
+
235
+ assert_allclose(imputer.fit_transform(X), X_imputed)
236
+
237
+
238
+ @pytest.mark.parametrize("na", [np.nan, -1])
239
+ def test_knn_imputer_all_samples_are_neighbors(na):
240
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
241
+
242
+ X_imputed = np.array([[0, 0], [6, 2], [4, 3], [5, 5.5], [7, 7], [6, 8], [14, 13]])
243
+
244
+ n_neighbors = X.shape[0] - 1
245
+ imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
246
+
247
+ assert_allclose(imputer.fit_transform(X), X_imputed)
248
+
249
+ n_neighbors = X.shape[0]
250
+ imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
251
+ assert_allclose(imputer_plus1.fit_transform(X), X_imputed)
252
+
253
+
254
+ @pytest.mark.parametrize("na", [np.nan, -1])
255
+ def test_knn_imputer_weight_uniform(na):
256
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
257
+
258
+ # Test with "uniform" weight (or unweighted)
259
+ X_imputed_uniform = np.array(
260
+ [[0, 0], [5, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
261
+ )
262
+
263
+ imputer = KNNImputer(weights="uniform", missing_values=na)
264
+ assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
265
+
266
+ # Test with "callable" weight
267
+ def no_weight(dist):
268
+ return None
269
+
270
+ imputer = KNNImputer(weights=no_weight, missing_values=na)
271
+ assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
272
+
273
+ # Test with "callable" uniform weight
274
+ def uniform_weight(dist):
275
+ return np.ones_like(dist)
276
+
277
+ imputer = KNNImputer(weights=uniform_weight, missing_values=na)
278
+ assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
279
+
280
+
281
+ @pytest.mark.parametrize("na", [np.nan, -1])
282
+ def test_knn_imputer_weight_distance(na):
283
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
284
+
285
+ # Test with "distance" weight
286
+ nn = KNeighborsRegressor(metric="euclidean", weights="distance")
287
+ X_rows_idx = [0, 2, 3, 4, 5, 6]
288
+ nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0])
289
+ knn_imputed_value = nn.predict(X[1:2, 1:])[0]
290
+
291
+ # Manual calculation
292
+ X_neighbors_idx = [0, 2, 3, 4, 5]
293
+ dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na)
294
+ weights = 1 / dist[:, X_neighbors_idx].ravel()
295
+ manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights)
296
+
297
+ X_imputed_distance1 = np.array(
298
+ [[0, 0], [manual_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
299
+ )
300
+
301
+ # NearestNeighbor calculation
302
+ X_imputed_distance2 = np.array(
303
+ [[0, 0], [knn_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
304
+ )
305
+
306
+ imputer = KNNImputer(weights="distance", missing_values=na)
307
+ assert_allclose(imputer.fit_transform(X), X_imputed_distance1)
308
+ assert_allclose(imputer.fit_transform(X), X_imputed_distance2)
309
+
310
+ # Test with weights = "distance" and n_neighbors=2
311
+ X = np.array(
312
+ [
313
+ [na, 0, 0],
314
+ [2, 1, 2],
315
+ [3, 2, 3],
316
+ [4, 5, 5],
317
+ ]
318
+ )
319
+
320
+ # neighbors are rows 1, 2, the nan_euclidean_distances are:
321
+ dist_0_1 = np.sqrt((3 / 2) * ((1 - 0) ** 2 + (2 - 0) ** 2))
322
+ dist_0_2 = np.sqrt((3 / 2) * ((2 - 0) ** 2 + (3 - 0) ** 2))
323
+ imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2])
324
+
325
+ X_imputed = np.array(
326
+ [
327
+ [imputed_value, 0, 0],
328
+ [2, 1, 2],
329
+ [3, 2, 3],
330
+ [4, 5, 5],
331
+ ]
332
+ )
333
+
334
+ imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na)
335
+ assert_allclose(imputer.fit_transform(X), X_imputed)
336
+
337
+ # Test with varying missingness patterns
338
+ X = np.array(
339
+ [
340
+ [1, 0, 0, 1],
341
+ [0, na, 1, na],
342
+ [1, 1, 1, na],
343
+ [0, 1, 0, 0],
344
+ [0, 0, 0, 0],
345
+ [1, 0, 1, 1],
346
+ [10, 10, 10, 10],
347
+ ]
348
+ )
349
+
350
+ # Get weights of donor neighbors
351
+ dist = nan_euclidean_distances(X, missing_values=na)
352
+ r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]]
353
+ r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]]
354
+ r1c1_nbor_wt = 1 / r1c1_nbor_dists
355
+ r1c3_nbor_wt = 1 / r1c3_nbor_dists
356
+
357
+ r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]]
358
+ r2c3_nbor_wt = 1 / r2c3_nbor_dists
359
+
360
+ # Collect donor values
361
+ col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy()
362
+ col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy()
363
+
364
+ # Final imputed values
365
+ r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt)
366
+ r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt)
367
+ r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt)
368
+
369
+ X_imputed = np.array(
370
+ [
371
+ [1, 0, 0, 1],
372
+ [0, r1c1_imp, 1, r1c3_imp],
373
+ [1, 1, 1, r2c3_imp],
374
+ [0, 1, 0, 0],
375
+ [0, 0, 0, 0],
376
+ [1, 0, 1, 1],
377
+ [10, 10, 10, 10],
378
+ ]
379
+ )
380
+
381
+ imputer = KNNImputer(weights="distance", missing_values=na)
382
+ assert_allclose(imputer.fit_transform(X), X_imputed)
383
+
384
+ X = np.array(
385
+ [
386
+ [0, 0, 0, na],
387
+ [1, 1, 1, na],
388
+ [2, 2, na, 2],
389
+ [3, 3, 3, 3],
390
+ [4, 4, 4, 4],
391
+ [5, 5, 5, 5],
392
+ [6, 6, 6, 6],
393
+ [na, 7, 7, 7],
394
+ ]
395
+ )
396
+
397
+ dist = pairwise_distances(
398
+ X, metric="nan_euclidean", squared=False, missing_values=na
399
+ )
400
+
401
+ # Calculate weights
402
+ r0c3_w = 1.0 / dist[0, 2:-1]
403
+ r1c3_w = 1.0 / dist[1, 2:-1]
404
+ r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)]
405
+ r7c0_w = 1.0 / dist[7, 2:7]
406
+
407
+ # Calculate weighted averages
408
+ r0c3 = np.average(X[2:-1, -1], weights=r0c3_w)
409
+ r1c3 = np.average(X[2:-1, -1], weights=r1c3_w)
410
+ r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w)
411
+ r7c0 = np.average(X[2:7, 0], weights=r7c0_w)
412
+
413
+ X_imputed = np.array(
414
+ [
415
+ [0, 0, 0, r0c3],
416
+ [1, 1, 1, r1c3],
417
+ [2, 2, r2c2, 2],
418
+ [3, 3, 3, 3],
419
+ [4, 4, 4, 4],
420
+ [5, 5, 5, 5],
421
+ [6, 6, 6, 6],
422
+ [r7c0, 7, 7, 7],
423
+ ]
424
+ )
425
+
426
+ imputer_comp_wt = KNNImputer(missing_values=na, weights="distance")
427
+ assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed)
428
+
429
+
430
+ def test_knn_imputer_callable_metric():
431
+ # Define callable metric that returns the l1 norm:
432
+ def custom_callable(x, y, missing_values=np.nan, squared=False):
433
+ x = np.ma.array(x, mask=np.isnan(x))
434
+ y = np.ma.array(y, mask=np.isnan(y))
435
+ dist = np.nansum(np.abs(x - y))
436
+ return dist
437
+
438
+ X = np.array([[4, 3, 3, np.nan], [6, 9, 6, 9], [4, 8, 6, 9], [np.nan, 9, 11, 10.0]])
439
+
440
+ X_0_3 = (9 + 9) / 2
441
+ X_3_0 = (6 + 4) / 2
442
+ X_imputed = np.array(
443
+ [[4, 3, 3, X_0_3], [6, 9, 6, 9], [4, 8, 6, 9], [X_3_0, 9, 11, 10.0]]
444
+ )
445
+
446
+ imputer = KNNImputer(n_neighbors=2, metric=custom_callable)
447
+ assert_allclose(imputer.fit_transform(X), X_imputed)
448
+
449
+
450
+ @pytest.mark.parametrize("working_memory", [None, 0])
451
+ @pytest.mark.parametrize("na", [-1, np.nan])
452
+ # Note that we use working_memory=0 to ensure that chunking is tested, even
453
+ # for a small dataset. However, it should raise a UserWarning that we ignore.
454
+ @pytest.mark.filterwarnings("ignore:adhere to working_memory")
455
+ def test_knn_imputer_with_simple_example(na, working_memory):
456
+ X = np.array(
457
+ [
458
+ [0, na, 0, na],
459
+ [1, 1, 1, na],
460
+ [2, 2, na, 2],
461
+ [3, 3, 3, 3],
462
+ [4, 4, 4, 4],
463
+ [5, 5, 5, 5],
464
+ [6, 6, 6, 6],
465
+ [na, 7, 7, 7],
466
+ ]
467
+ )
468
+
469
+ r0c1 = np.mean(X[1:6, 1])
470
+ r0c3 = np.mean(X[2:-1, -1])
471
+ r1c3 = np.mean(X[2:-1, -1])
472
+ r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2])
473
+ r7c0 = np.mean(X[2:-1, 0])
474
+
475
+ X_imputed = np.array(
476
+ [
477
+ [0, r0c1, 0, r0c3],
478
+ [1, 1, 1, r1c3],
479
+ [2, 2, r2c2, 2],
480
+ [3, 3, 3, 3],
481
+ [4, 4, 4, 4],
482
+ [5, 5, 5, 5],
483
+ [6, 6, 6, 6],
484
+ [r7c0, 7, 7, 7],
485
+ ]
486
+ )
487
+
488
+ with config_context(working_memory=working_memory):
489
+ imputer_comp = KNNImputer(missing_values=na)
490
+ assert_allclose(imputer_comp.fit_transform(X), X_imputed)
491
+
492
+
493
+ @pytest.mark.parametrize("na", [-1, np.nan])
494
+ @pytest.mark.parametrize("weights", ["uniform", "distance"])
495
+ def test_knn_imputer_not_enough_valid_distances(na, weights):
496
+ # Samples with needed feature has nan distance
497
+ X1 = np.array([[na, 11], [na, 1], [3, na]])
498
+ X1_imputed = np.array([[3, 11], [3, 1], [3, 6]])
499
+
500
+ knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights)
501
+ assert_allclose(knn.fit_transform(X1), X1_imputed)
502
+
503
+ X2 = np.array([[4, na]])
504
+ X2_imputed = np.array([[4, 6]])
505
+ assert_allclose(knn.transform(X2), X2_imputed)
506
+
507
+
508
+ @pytest.mark.parametrize("na", [-1, np.nan])
509
+ def test_knn_imputer_drops_all_nan_features(na):
510
+ X1 = np.array([[na, 1], [na, 2]])
511
+ knn = KNNImputer(missing_values=na, n_neighbors=1)
512
+ X1_expected = np.array([[1], [2]])
513
+ assert_allclose(knn.fit_transform(X1), X1_expected)
514
+
515
+ X2 = np.array([[1, 2], [3, na]])
516
+ X2_expected = np.array([[2], [1.5]])
517
+ assert_allclose(knn.transform(X2), X2_expected)
518
+
519
+
520
+ @pytest.mark.parametrize("working_memory", [None, 0])
521
+ @pytest.mark.parametrize("na", [-1, np.nan])
522
+ def test_knn_imputer_distance_weighted_not_enough_neighbors(na, working_memory):
523
+ X = np.array([[3, na], [2, na], [na, 4], [5, 6], [6, 8], [na, 5]])
524
+
525
+ dist = pairwise_distances(
526
+ X, metric="nan_euclidean", squared=False, missing_values=na
527
+ )
528
+
529
+ X_01 = np.average(X[3:5, 1], weights=1 / dist[0, 3:5])
530
+ X_11 = np.average(X[3:5, 1], weights=1 / dist[1, 3:5])
531
+ X_20 = np.average(X[3:5, 0], weights=1 / dist[2, 3:5])
532
+ X_50 = np.average(X[3:5, 0], weights=1 / dist[5, 3:5])
533
+
534
+ X_expected = np.array([[3, X_01], [2, X_11], [X_20, 4], [5, 6], [6, 8], [X_50, 5]])
535
+
536
+ with config_context(working_memory=working_memory):
537
+ knn_3 = KNNImputer(missing_values=na, n_neighbors=3, weights="distance")
538
+ assert_allclose(knn_3.fit_transform(X), X_expected)
539
+
540
+ knn_4 = KNNImputer(missing_values=na, n_neighbors=4, weights="distance")
541
+ assert_allclose(knn_4.fit_transform(X), X_expected)
542
+
543
+
544
+ @pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)])
545
+ def test_knn_tags(na, allow_nan):
546
+ knn = KNNImputer(missing_values=na)
547
+ assert knn._get_tags()["allow_nan"] == allow_nan