applied-ai-018 commited on
Commit
8ec04f5
·
verified ·
1 Parent(s): 696cd06

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  2. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py +2301 -0
  3. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py +795 -0
  4. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py +572 -0
  5. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_lda.py +929 -0
  6. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  7. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py +551 -0
  8. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py +319 -0
  9. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py +47 -0
  10. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_base.py +266 -0
  19. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py +522 -0
  20. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py +514 -0
  21. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py +792 -0
  22. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py +300 -0
  23. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py +1161 -0
  24. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py +136 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py +0 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py +153 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py +93 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py +1017 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py +684 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py +254 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py +615 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py +323 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py +72 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/impute/__init__.py +24 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/impute/_base.py +1075 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/impute/_iterative.py +906 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/impute/_knn.py +401 -0
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (246 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py ADDED
@@ -0,0 +1,2301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Dictionary learning.
2
+ """
3
+ # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
4
+ # License: BSD 3 clause
5
+
6
+ import itertools
7
+ import sys
8
+ import time
9
+ from numbers import Integral, Real
10
+ from warnings import warn
11
+
12
+ import numpy as np
13
+ from joblib import effective_n_jobs
14
+ from scipy import linalg
15
+
16
+ from ..base import (
17
+ BaseEstimator,
18
+ ClassNamePrefixFeaturesOutMixin,
19
+ TransformerMixin,
20
+ _fit_context,
21
+ )
22
+ from ..linear_model import Lars, Lasso, LassoLars, orthogonal_mp_gram
23
+ from ..utils import check_array, check_random_state, gen_batches, gen_even_slices
24
+ from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
25
+ from ..utils.extmath import randomized_svd, row_norms, svd_flip
26
+ from ..utils.parallel import Parallel, delayed
27
+ from ..utils.validation import check_is_fitted
28
+
29
+
30
+ def _check_positive_coding(method, positive):
31
+ if positive and method in ["omp", "lars"]:
32
+ raise ValueError(
33
+ "Positive constraint not supported for '{}' coding method.".format(method)
34
+ )
35
+
36
+
37
+ def _sparse_encode_precomputed(
38
+ X,
39
+ dictionary,
40
+ *,
41
+ gram=None,
42
+ cov=None,
43
+ algorithm="lasso_lars",
44
+ regularization=None,
45
+ copy_cov=True,
46
+ init=None,
47
+ max_iter=1000,
48
+ verbose=0,
49
+ positive=False,
50
+ ):
51
+ """Generic sparse coding with precomputed Gram and/or covariance matrices.
52
+
53
+ Each row of the result is the solution to a Lasso problem.
54
+
55
+ Parameters
56
+ ----------
57
+ X : ndarray of shape (n_samples, n_features)
58
+ Data matrix.
59
+
60
+ dictionary : ndarray of shape (n_components, n_features)
61
+ The dictionary matrix against which to solve the sparse coding of
62
+ the data. Some of the algorithms assume normalized rows.
63
+
64
+ gram : ndarray of shape (n_components, n_components), default=None
65
+ Precomputed Gram matrix, `dictionary * dictionary'`
66
+ gram can be `None` if method is 'threshold'.
67
+
68
+ cov : ndarray of shape (n_components, n_samples), default=None
69
+ Precomputed covariance, `dictionary * X'`.
70
+
71
+ algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
72
+ default='lasso_lars'
73
+ The algorithm used:
74
+
75
+ * `'lars'`: uses the least angle regression method
76
+ (`linear_model.lars_path`);
77
+ * `'lasso_lars'`: uses Lars to compute the Lasso solution;
78
+ * `'lasso_cd'`: uses the coordinate descent method to compute the
79
+ Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
80
+ the estimated components are sparse;
81
+ * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
82
+ solution;
83
+ * `'threshold'`: squashes to zero all coefficients less than
84
+ regularization from the projection `dictionary * data'`.
85
+
86
+ regularization : int or float, default=None
87
+ The regularization parameter. It corresponds to alpha when
88
+ algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
89
+ Otherwise it corresponds to `n_nonzero_coefs`.
90
+
91
+ init : ndarray of shape (n_samples, n_components), default=None
92
+ Initialization value of the sparse code. Only used if
93
+ `algorithm='lasso_cd'`.
94
+
95
+ max_iter : int, default=1000
96
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
97
+ `'lasso_lars'`.
98
+
99
+ copy_cov : bool, default=True
100
+ Whether to copy the precomputed covariance matrix; if `False`, it may
101
+ be overwritten.
102
+
103
+ verbose : int, default=0
104
+ Controls the verbosity; the higher, the more messages.
105
+
106
+ positive: bool, default=False
107
+ Whether to enforce a positivity constraint on the sparse code.
108
+
109
+ .. versionadded:: 0.20
110
+
111
+ Returns
112
+ -------
113
+ code : ndarray of shape (n_components, n_features)
114
+ The sparse codes.
115
+ """
116
+ n_samples, n_features = X.shape
117
+ n_components = dictionary.shape[0]
118
+
119
+ if algorithm == "lasso_lars":
120
+ alpha = float(regularization) / n_features # account for scaling
121
+ try:
122
+ err_mgt = np.seterr(all="ignore")
123
+
124
+ # Not passing in verbose=max(0, verbose-1) because Lars.fit already
125
+ # corrects the verbosity level.
126
+ lasso_lars = LassoLars(
127
+ alpha=alpha,
128
+ fit_intercept=False,
129
+ verbose=verbose,
130
+ precompute=gram,
131
+ fit_path=False,
132
+ positive=positive,
133
+ max_iter=max_iter,
134
+ )
135
+ lasso_lars.fit(dictionary.T, X.T, Xy=cov)
136
+ new_code = lasso_lars.coef_
137
+ finally:
138
+ np.seterr(**err_mgt)
139
+
140
+ elif algorithm == "lasso_cd":
141
+ alpha = float(regularization) / n_features # account for scaling
142
+
143
+ # TODO: Make verbosity argument for Lasso?
144
+ # sklearn.linear_model.coordinate_descent.enet_path has a verbosity
145
+ # argument that we could pass in from Lasso.
146
+ clf = Lasso(
147
+ alpha=alpha,
148
+ fit_intercept=False,
149
+ precompute=gram,
150
+ max_iter=max_iter,
151
+ warm_start=True,
152
+ positive=positive,
153
+ )
154
+
155
+ if init is not None:
156
+ # In some workflows using coordinate descent algorithms:
157
+ # - users might provide NumPy arrays with read-only buffers
158
+ # - `joblib` might memmap arrays making their buffer read-only
159
+ # TODO: move this handling (which is currently too broad)
160
+ # closer to the actual private function which need buffers to be writable.
161
+ if not init.flags["WRITEABLE"]:
162
+ init = np.array(init)
163
+ clf.coef_ = init
164
+
165
+ clf.fit(dictionary.T, X.T, check_input=False)
166
+ new_code = clf.coef_
167
+
168
+ elif algorithm == "lars":
169
+ try:
170
+ err_mgt = np.seterr(all="ignore")
171
+
172
+ # Not passing in verbose=max(0, verbose-1) because Lars.fit already
173
+ # corrects the verbosity level.
174
+ lars = Lars(
175
+ fit_intercept=False,
176
+ verbose=verbose,
177
+ precompute=gram,
178
+ n_nonzero_coefs=int(regularization),
179
+ fit_path=False,
180
+ )
181
+ lars.fit(dictionary.T, X.T, Xy=cov)
182
+ new_code = lars.coef_
183
+ finally:
184
+ np.seterr(**err_mgt)
185
+
186
+ elif algorithm == "threshold":
187
+ new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T
188
+ if positive:
189
+ np.clip(new_code, 0, None, out=new_code)
190
+
191
+ elif algorithm == "omp":
192
+ new_code = orthogonal_mp_gram(
193
+ Gram=gram,
194
+ Xy=cov,
195
+ n_nonzero_coefs=int(regularization),
196
+ tol=None,
197
+ norms_squared=row_norms(X, squared=True),
198
+ copy_Xy=copy_cov,
199
+ ).T
200
+
201
+ return new_code.reshape(n_samples, n_components)
202
+
203
+
204
+ @validate_params(
205
+ {
206
+ "X": ["array-like"],
207
+ "dictionary": ["array-like"],
208
+ "gram": ["array-like", None],
209
+ "cov": ["array-like", None],
210
+ "algorithm": [
211
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
212
+ ],
213
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
214
+ "alpha": [Interval(Real, 0, None, closed="left"), None],
215
+ "copy_cov": ["boolean"],
216
+ "init": ["array-like", None],
217
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
218
+ "n_jobs": [Integral, None],
219
+ "check_input": ["boolean"],
220
+ "verbose": ["verbose"],
221
+ "positive": ["boolean"],
222
+ },
223
+ prefer_skip_nested_validation=True,
224
+ )
225
+ # XXX : could be moved to the linear_model module
226
+ def sparse_encode(
227
+ X,
228
+ dictionary,
229
+ *,
230
+ gram=None,
231
+ cov=None,
232
+ algorithm="lasso_lars",
233
+ n_nonzero_coefs=None,
234
+ alpha=None,
235
+ copy_cov=True,
236
+ init=None,
237
+ max_iter=1000,
238
+ n_jobs=None,
239
+ check_input=True,
240
+ verbose=0,
241
+ positive=False,
242
+ ):
243
+ """Sparse coding.
244
+
245
+ Each row of the result is the solution to a sparse coding problem.
246
+ The goal is to find a sparse array `code` such that::
247
+
248
+ X ~= code * dictionary
249
+
250
+ Read more in the :ref:`User Guide <SparseCoder>`.
251
+
252
+ Parameters
253
+ ----------
254
+ X : array-like of shape (n_samples, n_features)
255
+ Data matrix.
256
+
257
+ dictionary : array-like of shape (n_components, n_features)
258
+ The dictionary matrix against which to solve the sparse coding of
259
+ the data. Some of the algorithms assume normalized rows for meaningful
260
+ output.
261
+
262
+ gram : array-like of shape (n_components, n_components), default=None
263
+ Precomputed Gram matrix, `dictionary * dictionary'`.
264
+
265
+ cov : array-like of shape (n_components, n_samples), default=None
266
+ Precomputed covariance, `dictionary' * X`.
267
+
268
+ algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
269
+ default='lasso_lars'
270
+ The algorithm used:
271
+
272
+ * `'lars'`: uses the least angle regression method
273
+ (`linear_model.lars_path`);
274
+ * `'lasso_lars'`: uses Lars to compute the Lasso solution;
275
+ * `'lasso_cd'`: uses the coordinate descent method to compute the
276
+ Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
277
+ the estimated components are sparse;
278
+ * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
279
+ solution;
280
+ * `'threshold'`: squashes to zero all coefficients less than
281
+ regularization from the projection `dictionary * data'`.
282
+
283
+ n_nonzero_coefs : int, default=None
284
+ Number of nonzero coefficients to target in each column of the
285
+ solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
286
+ and is overridden by `alpha` in the `omp` case. If `None`, then
287
+ `n_nonzero_coefs=int(n_features / 10)`.
288
+
289
+ alpha : float, default=None
290
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
291
+ penalty applied to the L1 norm.
292
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
293
+ threshold below which coefficients will be squashed to zero.
294
+ If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
295
+ the reconstruction error targeted. In this case, it overrides
296
+ `n_nonzero_coefs`.
297
+ If `None`, default to 1.
298
+
299
+ copy_cov : bool, default=True
300
+ Whether to copy the precomputed covariance matrix; if `False`, it may
301
+ be overwritten.
302
+
303
+ init : ndarray of shape (n_samples, n_components), default=None
304
+ Initialization value of the sparse codes. Only used if
305
+ `algorithm='lasso_cd'`.
306
+
307
+ max_iter : int, default=1000
308
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
309
+ `'lasso_lars'`.
310
+
311
+ n_jobs : int, default=None
312
+ Number of parallel jobs to run.
313
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
314
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
315
+ for more details.
316
+
317
+ check_input : bool, default=True
318
+ If `False`, the input arrays X and dictionary will not be checked.
319
+
320
+ verbose : int, default=0
321
+ Controls the verbosity; the higher, the more messages.
322
+
323
+ positive : bool, default=False
324
+ Whether to enforce positivity when finding the encoding.
325
+
326
+ .. versionadded:: 0.20
327
+
328
+ Returns
329
+ -------
330
+ code : ndarray of shape (n_samples, n_components)
331
+ The sparse codes.
332
+
333
+ See Also
334
+ --------
335
+ sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso
336
+ path using LARS algorithm.
337
+ sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems.
338
+ sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
339
+ SparseCoder : Find a sparse representation of data from a fixed precomputed
340
+ dictionary.
341
+
342
+ Examples
343
+ --------
344
+ >>> import numpy as np
345
+ >>> from sklearn.decomposition import sparse_encode
346
+ >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
347
+ >>> dictionary = np.array(
348
+ ... [[0, 1, 0],
349
+ ... [-1, -1, 2],
350
+ ... [1, 1, 1],
351
+ ... [0, 1, 1],
352
+ ... [0, 2, 1]],
353
+ ... dtype=np.float64
354
+ ... )
355
+ >>> sparse_encode(X, dictionary, alpha=1e-10)
356
+ array([[ 0., 0., -1., 0., 0.],
357
+ [ 0., 1., 1., 0., 0.]])
358
+ """
359
+ if check_input:
360
+ if algorithm == "lasso_cd":
361
+ dictionary = check_array(
362
+ dictionary, order="C", dtype=[np.float64, np.float32]
363
+ )
364
+ X = check_array(X, order="C", dtype=[np.float64, np.float32])
365
+ else:
366
+ dictionary = check_array(dictionary)
367
+ X = check_array(X)
368
+
369
+ if dictionary.shape[1] != X.shape[1]:
370
+ raise ValueError(
371
+ "Dictionary and X have different numbers of features:"
372
+ "dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape)
373
+ )
374
+
375
+ _check_positive_coding(algorithm, positive)
376
+
377
+ return _sparse_encode(
378
+ X,
379
+ dictionary,
380
+ gram=gram,
381
+ cov=cov,
382
+ algorithm=algorithm,
383
+ n_nonzero_coefs=n_nonzero_coefs,
384
+ alpha=alpha,
385
+ copy_cov=copy_cov,
386
+ init=init,
387
+ max_iter=max_iter,
388
+ n_jobs=n_jobs,
389
+ verbose=verbose,
390
+ positive=positive,
391
+ )
392
+
393
+
394
+ def _sparse_encode(
395
+ X,
396
+ dictionary,
397
+ *,
398
+ gram=None,
399
+ cov=None,
400
+ algorithm="lasso_lars",
401
+ n_nonzero_coefs=None,
402
+ alpha=None,
403
+ copy_cov=True,
404
+ init=None,
405
+ max_iter=1000,
406
+ n_jobs=None,
407
+ verbose=0,
408
+ positive=False,
409
+ ):
410
+ """Sparse coding without input/parameter validation."""
411
+
412
+ n_samples, n_features = X.shape
413
+ n_components = dictionary.shape[0]
414
+
415
+ if algorithm in ("lars", "omp"):
416
+ regularization = n_nonzero_coefs
417
+ if regularization is None:
418
+ regularization = min(max(n_features / 10, 1), n_components)
419
+ else:
420
+ regularization = alpha
421
+ if regularization is None:
422
+ regularization = 1.0
423
+
424
+ if gram is None and algorithm != "threshold":
425
+ gram = np.dot(dictionary, dictionary.T)
426
+
427
+ if cov is None and algorithm != "lasso_cd":
428
+ copy_cov = False
429
+ cov = np.dot(dictionary, X.T)
430
+
431
+ if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold":
432
+ code = _sparse_encode_precomputed(
433
+ X,
434
+ dictionary,
435
+ gram=gram,
436
+ cov=cov,
437
+ algorithm=algorithm,
438
+ regularization=regularization,
439
+ copy_cov=copy_cov,
440
+ init=init,
441
+ max_iter=max_iter,
442
+ verbose=verbose,
443
+ positive=positive,
444
+ )
445
+ return code
446
+
447
+ # Enter parallel code block
448
+ n_samples = X.shape[0]
449
+ n_components = dictionary.shape[0]
450
+ code = np.empty((n_samples, n_components))
451
+ slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
452
+
453
+ code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
454
+ delayed(_sparse_encode_precomputed)(
455
+ X[this_slice],
456
+ dictionary,
457
+ gram=gram,
458
+ cov=cov[:, this_slice] if cov is not None else None,
459
+ algorithm=algorithm,
460
+ regularization=regularization,
461
+ copy_cov=copy_cov,
462
+ init=init[this_slice] if init is not None else None,
463
+ max_iter=max_iter,
464
+ verbose=verbose,
465
+ positive=positive,
466
+ )
467
+ for this_slice in slices
468
+ )
469
+ for this_slice, this_view in zip(slices, code_views):
470
+ code[this_slice] = this_view
471
+ return code
472
+
473
+
474
+ def _update_dict(
475
+ dictionary,
476
+ Y,
477
+ code,
478
+ A=None,
479
+ B=None,
480
+ verbose=False,
481
+ random_state=None,
482
+ positive=False,
483
+ ):
484
+ """Update the dense dictionary factor in place.
485
+
486
+ Parameters
487
+ ----------
488
+ dictionary : ndarray of shape (n_components, n_features)
489
+ Value of the dictionary at the previous iteration.
490
+
491
+ Y : ndarray of shape (n_samples, n_features)
492
+ Data matrix.
493
+
494
+ code : ndarray of shape (n_samples, n_components)
495
+ Sparse coding of the data against which to optimize the dictionary.
496
+
497
+ A : ndarray of shape (n_components, n_components), default=None
498
+ Together with `B`, sufficient stats of the online model to update the
499
+ dictionary.
500
+
501
+ B : ndarray of shape (n_features, n_components), default=None
502
+ Together with `A`, sufficient stats of the online model to update the
503
+ dictionary.
504
+
505
+ verbose: bool, default=False
506
+ Degree of output the procedure will print.
507
+
508
+ random_state : int, RandomState instance or None, default=None
509
+ Used for randomly initializing the dictionary. Pass an int for
510
+ reproducible results across multiple function calls.
511
+ See :term:`Glossary <random_state>`.
512
+
513
+ positive : bool, default=False
514
+ Whether to enforce positivity when finding the dictionary.
515
+
516
+ .. versionadded:: 0.20
517
+ """
518
+ n_samples, n_components = code.shape
519
+ random_state = check_random_state(random_state)
520
+
521
+ if A is None:
522
+ A = code.T @ code
523
+ if B is None:
524
+ B = Y.T @ code
525
+
526
+ n_unused = 0
527
+
528
+ for k in range(n_components):
529
+ if A[k, k] > 1e-6:
530
+ # 1e-6 is arbitrary but consistent with the spams implementation
531
+ dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
532
+ else:
533
+ # kth atom is almost never used -> sample a new one from the data
534
+ newd = Y[random_state.choice(n_samples)]
535
+
536
+ # add small noise to avoid making the sparse coding ill conditioned
537
+ noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
538
+ noise = random_state.normal(0, noise_level, size=len(newd))
539
+
540
+ dictionary[k] = newd + noise
541
+ code[:, k] = 0
542
+ n_unused += 1
543
+
544
+ if positive:
545
+ np.clip(dictionary[k], 0, None, out=dictionary[k])
546
+
547
+ # Projection on the constraint set ||V_k|| <= 1
548
+ dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
549
+
550
+ if verbose and n_unused > 0:
551
+ print(f"{n_unused} unused atoms resampled.")
552
+
553
+
554
+ def _dict_learning(
555
+ X,
556
+ n_components,
557
+ *,
558
+ alpha,
559
+ max_iter,
560
+ tol,
561
+ method,
562
+ n_jobs,
563
+ dict_init,
564
+ code_init,
565
+ callback,
566
+ verbose,
567
+ random_state,
568
+ return_n_iter,
569
+ positive_dict,
570
+ positive_code,
571
+ method_max_iter,
572
+ ):
573
+ """Main dictionary learning algorithm"""
574
+ t0 = time.time()
575
+ # Init the code and the dictionary with SVD of Y
576
+ if code_init is not None and dict_init is not None:
577
+ code = np.array(code_init, order="F")
578
+ # Don't copy V, it will happen below
579
+ dictionary = dict_init
580
+ else:
581
+ code, S, dictionary = linalg.svd(X, full_matrices=False)
582
+ # flip the initial code's sign to enforce deterministic output
583
+ code, dictionary = svd_flip(code, dictionary)
584
+ dictionary = S[:, np.newaxis] * dictionary
585
+ r = len(dictionary)
586
+ if n_components <= r: # True even if n_components=None
587
+ code = code[:, :n_components]
588
+ dictionary = dictionary[:n_components, :]
589
+ else:
590
+ code = np.c_[code, np.zeros((len(code), n_components - r))]
591
+ dictionary = np.r_[
592
+ dictionary, np.zeros((n_components - r, dictionary.shape[1]))
593
+ ]
594
+
595
+ # Fortran-order dict better suited for the sparse coding which is the
596
+ # bottleneck of this algorithm.
597
+ dictionary = np.asfortranarray(dictionary)
598
+
599
+ errors = []
600
+ current_cost = np.nan
601
+
602
+ if verbose == 1:
603
+ print("[dict_learning]", end=" ")
604
+
605
+ # If max_iter is 0, number of iterations returned should be zero
606
+ ii = -1
607
+
608
+ for ii in range(max_iter):
609
+ dt = time.time() - t0
610
+ if verbose == 1:
611
+ sys.stdout.write(".")
612
+ sys.stdout.flush()
613
+ elif verbose:
614
+ print(
615
+ "Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
616
+ % (ii, dt, dt / 60, current_cost)
617
+ )
618
+
619
+ # Update code
620
+ code = sparse_encode(
621
+ X,
622
+ dictionary,
623
+ algorithm=method,
624
+ alpha=alpha,
625
+ init=code,
626
+ n_jobs=n_jobs,
627
+ positive=positive_code,
628
+ max_iter=method_max_iter,
629
+ verbose=verbose,
630
+ )
631
+
632
+ # Update dictionary in place
633
+ _update_dict(
634
+ dictionary,
635
+ X,
636
+ code,
637
+ verbose=verbose,
638
+ random_state=random_state,
639
+ positive=positive_dict,
640
+ )
641
+
642
+ # Cost function
643
+ current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum(
644
+ np.abs(code)
645
+ )
646
+ errors.append(current_cost)
647
+
648
+ if ii > 0:
649
+ dE = errors[-2] - errors[-1]
650
+ # assert(dE >= -tol * errors[-1])
651
+ if dE < tol * errors[-1]:
652
+ if verbose == 1:
653
+ # A line return
654
+ print("")
655
+ elif verbose:
656
+ print("--- Convergence reached after %d iterations" % ii)
657
+ break
658
+ if ii % 5 == 0 and callback is not None:
659
+ callback(locals())
660
+
661
+ if return_n_iter:
662
+ return code, dictionary, errors, ii + 1
663
+ else:
664
+ return code, dictionary, errors
665
+
666
+
667
+ def dict_learning_online(
668
+ X,
669
+ n_components=2,
670
+ *,
671
+ alpha=1,
672
+ max_iter=100,
673
+ return_code=True,
674
+ dict_init=None,
675
+ callback=None,
676
+ batch_size=256,
677
+ verbose=False,
678
+ shuffle=True,
679
+ n_jobs=None,
680
+ method="lars",
681
+ random_state=None,
682
+ positive_dict=False,
683
+ positive_code=False,
684
+ method_max_iter=1000,
685
+ tol=1e-3,
686
+ max_no_improvement=10,
687
+ ):
688
+ """Solve a dictionary learning matrix factorization problem online.
689
+
690
+ Finds the best dictionary and the corresponding sparse code for
691
+ approximating the data matrix X by solving::
692
+
693
+ (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
694
+ (U,V)
695
+ with || V_k ||_2 = 1 for all 0 <= k < n_components
696
+
697
+ where V is the dictionary and U is the sparse code. ||.||_Fro stands for
698
+ the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
699
+ which is the sum of the absolute values of all the entries in the matrix.
700
+ This is accomplished by repeatedly iterating over mini-batches by slicing
701
+ the input data.
702
+
703
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
704
+
705
+ Parameters
706
+ ----------
707
+ X : ndarray of shape (n_samples, n_features)
708
+ Data matrix.
709
+
710
+ n_components : int or None, default=2
711
+ Number of dictionary atoms to extract. If None, then ``n_components``
712
+ is set to ``n_features``.
713
+
714
+ alpha : float, default=1
715
+ Sparsity controlling parameter.
716
+
717
+ max_iter : int, default=100
718
+ Maximum number of iterations over the complete dataset before
719
+ stopping independently of any early stopping criterion heuristics.
720
+
721
+ .. versionadded:: 1.1
722
+
723
+ .. deprecated:: 1.4
724
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
725
+ Use the default value (i.e. `100`) instead.
726
+
727
+ return_code : bool, default=True
728
+ Whether to also return the code U or just the dictionary `V`.
729
+
730
+ dict_init : ndarray of shape (n_components, n_features), default=None
731
+ Initial values for the dictionary for warm restart scenarios.
732
+ If `None`, the initial values for the dictionary are created
733
+ with an SVD decomposition of the data via
734
+ :func:`~sklearn.utils.extmath.randomized_svd`.
735
+
736
+ callback : callable, default=None
737
+ A callable that gets invoked at the end of each iteration.
738
+
739
+ batch_size : int, default=256
740
+ The number of samples to take in each batch.
741
+
742
+ .. versionchanged:: 1.3
743
+ The default value of `batch_size` changed from 3 to 256 in version 1.3.
744
+
745
+ verbose : bool, default=False
746
+ To control the verbosity of the procedure.
747
+
748
+ shuffle : bool, default=True
749
+ Whether to shuffle the data before splitting it in batches.
750
+
751
+ n_jobs : int, default=None
752
+ Number of parallel jobs to run.
753
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
754
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
755
+ for more details.
756
+
757
+ method : {'lars', 'cd'}, default='lars'
758
+ * `'lars'`: uses the least angle regression method to solve the lasso
759
+ problem (`linear_model.lars_path`);
760
+ * `'cd'`: uses the coordinate descent method to compute the
761
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
762
+ the estimated components are sparse.
763
+
764
+ random_state : int, RandomState instance or None, default=None
765
+ Used for initializing the dictionary when ``dict_init`` is not
766
+ specified, randomly shuffling the data when ``shuffle`` is set to
767
+ ``True``, and updating the dictionary. Pass an int for reproducible
768
+ results across multiple function calls.
769
+ See :term:`Glossary <random_state>`.
770
+
771
+ positive_dict : bool, default=False
772
+ Whether to enforce positivity when finding the dictionary.
773
+
774
+ .. versionadded:: 0.20
775
+
776
+ positive_code : bool, default=False
777
+ Whether to enforce positivity when finding the code.
778
+
779
+ .. versionadded:: 0.20
780
+
781
+ method_max_iter : int, default=1000
782
+ Maximum number of iterations to perform when solving the lasso problem.
783
+
784
+ .. versionadded:: 0.22
785
+
786
+ tol : float, default=1e-3
787
+ Control early stopping based on the norm of the differences in the
788
+ dictionary between 2 steps.
789
+
790
+ To disable early stopping based on changes in the dictionary, set
791
+ `tol` to 0.0.
792
+
793
+ .. versionadded:: 1.1
794
+
795
+ max_no_improvement : int, default=10
796
+ Control early stopping based on the consecutive number of mini batches
797
+ that does not yield an improvement on the smoothed cost function.
798
+
799
+ To disable convergence detection based on cost function, set
800
+ `max_no_improvement` to None.
801
+
802
+ .. versionadded:: 1.1
803
+
804
+ Returns
805
+ -------
806
+ code : ndarray of shape (n_samples, n_components),
807
+ The sparse code (only returned if `return_code=True`).
808
+
809
+ dictionary : ndarray of shape (n_components, n_features),
810
+ The solutions to the dictionary learning problem.
811
+
812
+ n_iter : int
813
+ Number of iterations run. Returned only if `return_n_iter` is
814
+ set to `True`.
815
+
816
+ See Also
817
+ --------
818
+ dict_learning : Solve a dictionary learning matrix factorization problem.
819
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
820
+ MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary
821
+ learning algorithm.
822
+ SparsePCA : Sparse Principal Components Analysis.
823
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
824
+
825
+ Examples
826
+ --------
827
+ >>> import numpy as np
828
+ >>> from sklearn.datasets import make_sparse_coded_signal
829
+ >>> from sklearn.decomposition import dict_learning_online
830
+ >>> X, _, _ = make_sparse_coded_signal(
831
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
832
+ ... random_state=42,
833
+ ... )
834
+ >>> U, V = dict_learning_online(
835
+ ... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
836
+ ... )
837
+
838
+ We can check the level of sparsity of `U`:
839
+
840
+ >>> np.mean(U == 0)
841
+ 0.53...
842
+
843
+ We can compare the average squared euclidean norm of the reconstruction
844
+ error of the sparse coded signal relative to the squared euclidean norm of
845
+ the original signal:
846
+
847
+ >>> X_hat = U @ V
848
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
849
+ 0.05...
850
+ """
851
+ # TODO(1.6): remove in 1.6
852
+ if max_iter is None:
853
+ warn(
854
+ (
855
+ "`max_iter=None` is deprecated in version 1.4 and will be removed in "
856
+ "version 1.6. Use the default value (i.e. `100`) instead."
857
+ ),
858
+ FutureWarning,
859
+ )
860
+ max_iter = 100
861
+
862
+ transform_algorithm = "lasso_" + method
863
+
864
+ est = MiniBatchDictionaryLearning(
865
+ n_components=n_components,
866
+ alpha=alpha,
867
+ max_iter=max_iter,
868
+ n_jobs=n_jobs,
869
+ fit_algorithm=method,
870
+ batch_size=batch_size,
871
+ shuffle=shuffle,
872
+ dict_init=dict_init,
873
+ random_state=random_state,
874
+ transform_algorithm=transform_algorithm,
875
+ transform_alpha=alpha,
876
+ positive_code=positive_code,
877
+ positive_dict=positive_dict,
878
+ transform_max_iter=method_max_iter,
879
+ verbose=verbose,
880
+ callback=callback,
881
+ tol=tol,
882
+ max_no_improvement=max_no_improvement,
883
+ ).fit(X)
884
+
885
+ if not return_code:
886
+ return est.components_
887
+ else:
888
+ code = est.transform(X)
889
+ return code, est.components_
890
+
891
+
892
+ @validate_params(
893
+ {
894
+ "X": ["array-like"],
895
+ "method": [StrOptions({"lars", "cd"})],
896
+ "return_n_iter": ["boolean"],
897
+ "method_max_iter": [Interval(Integral, 0, None, closed="left")],
898
+ },
899
+ prefer_skip_nested_validation=False,
900
+ )
901
+ def dict_learning(
902
+ X,
903
+ n_components,
904
+ *,
905
+ alpha,
906
+ max_iter=100,
907
+ tol=1e-8,
908
+ method="lars",
909
+ n_jobs=None,
910
+ dict_init=None,
911
+ code_init=None,
912
+ callback=None,
913
+ verbose=False,
914
+ random_state=None,
915
+ return_n_iter=False,
916
+ positive_dict=False,
917
+ positive_code=False,
918
+ method_max_iter=1000,
919
+ ):
920
+ """Solve a dictionary learning matrix factorization problem.
921
+
922
+ Finds the best dictionary and the corresponding sparse code for
923
+ approximating the data matrix X by solving::
924
+
925
+ (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
926
+ (U,V)
927
+ with || V_k ||_2 = 1 for all 0 <= k < n_components
928
+
929
+ where V is the dictionary and U is the sparse code. ||.||_Fro stands for
930
+ the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
931
+ which is the sum of the absolute values of all the entries in the matrix.
932
+
933
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
934
+
935
+ Parameters
936
+ ----------
937
+ X : array-like of shape (n_samples, n_features)
938
+ Data matrix.
939
+
940
+ n_components : int
941
+ Number of dictionary atoms to extract.
942
+
943
+ alpha : int or float
944
+ Sparsity controlling parameter.
945
+
946
+ max_iter : int, default=100
947
+ Maximum number of iterations to perform.
948
+
949
+ tol : float, default=1e-8
950
+ Tolerance for the stopping condition.
951
+
952
+ method : {'lars', 'cd'}, default='lars'
953
+ The method used:
954
+
955
+ * `'lars'`: uses the least angle regression method to solve the lasso
956
+ problem (`linear_model.lars_path`);
957
+ * `'cd'`: uses the coordinate descent method to compute the
958
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
959
+ the estimated components are sparse.
960
+
961
+ n_jobs : int, default=None
962
+ Number of parallel jobs to run.
963
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
964
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
965
+ for more details.
966
+
967
+ dict_init : ndarray of shape (n_components, n_features), default=None
968
+ Initial value for the dictionary for warm restart scenarios. Only used
969
+ if `code_init` and `dict_init` are not None.
970
+
971
+ code_init : ndarray of shape (n_samples, n_components), default=None
972
+ Initial value for the sparse code for warm restart scenarios. Only used
973
+ if `code_init` and `dict_init` are not None.
974
+
975
+ callback : callable, default=None
976
+ Callable that gets invoked every five iterations.
977
+
978
+ verbose : bool, default=False
979
+ To control the verbosity of the procedure.
980
+
981
+ random_state : int, RandomState instance or None, default=None
982
+ Used for randomly initializing the dictionary. Pass an int for
983
+ reproducible results across multiple function calls.
984
+ See :term:`Glossary <random_state>`.
985
+
986
+ return_n_iter : bool, default=False
987
+ Whether or not to return the number of iterations.
988
+
989
+ positive_dict : bool, default=False
990
+ Whether to enforce positivity when finding the dictionary.
991
+
992
+ .. versionadded:: 0.20
993
+
994
+ positive_code : bool, default=False
995
+ Whether to enforce positivity when finding the code.
996
+
997
+ .. versionadded:: 0.20
998
+
999
+ method_max_iter : int, default=1000
1000
+ Maximum number of iterations to perform.
1001
+
1002
+ .. versionadded:: 0.22
1003
+
1004
+ Returns
1005
+ -------
1006
+ code : ndarray of shape (n_samples, n_components)
1007
+ The sparse code factor in the matrix factorization.
1008
+
1009
+ dictionary : ndarray of shape (n_components, n_features),
1010
+ The dictionary factor in the matrix factorization.
1011
+
1012
+ errors : array
1013
+ Vector of errors at each iteration.
1014
+
1015
+ n_iter : int
1016
+ Number of iterations run. Returned only if `return_n_iter` is
1017
+ set to True.
1018
+
1019
+ See Also
1020
+ --------
1021
+ dict_learning_online : Solve a dictionary learning matrix factorization
1022
+ problem online.
1023
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1024
+ MiniBatchDictionaryLearning : A faster, less accurate version
1025
+ of the dictionary learning algorithm.
1026
+ SparsePCA : Sparse Principal Components Analysis.
1027
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1028
+
1029
+ Examples
1030
+ --------
1031
+ >>> import numpy as np
1032
+ >>> from sklearn.datasets import make_sparse_coded_signal
1033
+ >>> from sklearn.decomposition import dict_learning
1034
+ >>> X, _, _ = make_sparse_coded_signal(
1035
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1036
+ ... random_state=42,
1037
+ ... )
1038
+ >>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)
1039
+
1040
+ We can check the level of sparsity of `U`:
1041
+
1042
+ >>> np.mean(U == 0)
1043
+ 0.6...
1044
+
1045
+ We can compare the average squared euclidean norm of the reconstruction
1046
+ error of the sparse coded signal relative to the squared euclidean norm of
1047
+ the original signal:
1048
+
1049
+ >>> X_hat = U @ V
1050
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1051
+ 0.01...
1052
+ """
1053
+ estimator = DictionaryLearning(
1054
+ n_components=n_components,
1055
+ alpha=alpha,
1056
+ max_iter=max_iter,
1057
+ tol=tol,
1058
+ fit_algorithm=method,
1059
+ n_jobs=n_jobs,
1060
+ dict_init=dict_init,
1061
+ callback=callback,
1062
+ code_init=code_init,
1063
+ verbose=verbose,
1064
+ random_state=random_state,
1065
+ positive_code=positive_code,
1066
+ positive_dict=positive_dict,
1067
+ transform_max_iter=method_max_iter,
1068
+ ).set_output(transform="default")
1069
+ code = estimator.fit_transform(X)
1070
+ if return_n_iter:
1071
+ return (
1072
+ code,
1073
+ estimator.components_,
1074
+ estimator.error_,
1075
+ estimator.n_iter_,
1076
+ )
1077
+ return code, estimator.components_, estimator.error_
1078
+
1079
+
1080
+ class _BaseSparseCoding(ClassNamePrefixFeaturesOutMixin, TransformerMixin):
1081
+ """Base class from SparseCoder and DictionaryLearning algorithms."""
1082
+
1083
+ def __init__(
1084
+ self,
1085
+ transform_algorithm,
1086
+ transform_n_nonzero_coefs,
1087
+ transform_alpha,
1088
+ split_sign,
1089
+ n_jobs,
1090
+ positive_code,
1091
+ transform_max_iter,
1092
+ ):
1093
+ self.transform_algorithm = transform_algorithm
1094
+ self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
1095
+ self.transform_alpha = transform_alpha
1096
+ self.transform_max_iter = transform_max_iter
1097
+ self.split_sign = split_sign
1098
+ self.n_jobs = n_jobs
1099
+ self.positive_code = positive_code
1100
+
1101
+ def _transform(self, X, dictionary):
1102
+ """Private method allowing to accommodate both DictionaryLearning and
1103
+ SparseCoder."""
1104
+ X = self._validate_data(X, reset=False)
1105
+
1106
+ if hasattr(self, "alpha") and self.transform_alpha is None:
1107
+ transform_alpha = self.alpha
1108
+ else:
1109
+ transform_alpha = self.transform_alpha
1110
+
1111
+ code = sparse_encode(
1112
+ X,
1113
+ dictionary,
1114
+ algorithm=self.transform_algorithm,
1115
+ n_nonzero_coefs=self.transform_n_nonzero_coefs,
1116
+ alpha=transform_alpha,
1117
+ max_iter=self.transform_max_iter,
1118
+ n_jobs=self.n_jobs,
1119
+ positive=self.positive_code,
1120
+ )
1121
+
1122
+ if self.split_sign:
1123
+ # feature vector is split into a positive and negative side
1124
+ n_samples, n_features = code.shape
1125
+ split_code = np.empty((n_samples, 2 * n_features))
1126
+ split_code[:, :n_features] = np.maximum(code, 0)
1127
+ split_code[:, n_features:] = -np.minimum(code, 0)
1128
+ code = split_code
1129
+
1130
+ return code
1131
+
1132
+ def transform(self, X):
1133
+ """Encode the data as a sparse combination of the dictionary atoms.
1134
+
1135
+ Coding method is determined by the object parameter
1136
+ `transform_algorithm`.
1137
+
1138
+ Parameters
1139
+ ----------
1140
+ X : ndarray of shape (n_samples, n_features)
1141
+ Test data to be transformed, must have the same number of
1142
+ features as the data used to train the model.
1143
+
1144
+ Returns
1145
+ -------
1146
+ X_new : ndarray of shape (n_samples, n_components)
1147
+ Transformed data.
1148
+ """
1149
+ check_is_fitted(self)
1150
+ return self._transform(X, self.components_)
1151
+
1152
+
1153
+ class SparseCoder(_BaseSparseCoding, BaseEstimator):
1154
+ """Sparse coding.
1155
+
1156
+ Finds a sparse representation of data against a fixed, precomputed
1157
+ dictionary.
1158
+
1159
+ Each row of the result is the solution to a sparse coding problem.
1160
+ The goal is to find a sparse array `code` such that::
1161
+
1162
+ X ~= code * dictionary
1163
+
1164
+ Read more in the :ref:`User Guide <SparseCoder>`.
1165
+
1166
+ Parameters
1167
+ ----------
1168
+ dictionary : ndarray of shape (n_components, n_features)
1169
+ The dictionary atoms used for sparse coding. Lines are assumed to be
1170
+ normalized to unit norm.
1171
+
1172
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1173
+ 'threshold'}, default='omp'
1174
+ Algorithm used to transform the data:
1175
+
1176
+ - `'lars'`: uses the least angle regression method
1177
+ (`linear_model.lars_path`);
1178
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution;
1179
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1180
+ Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
1181
+ the estimated components are sparse;
1182
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1183
+ solution;
1184
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1185
+ the projection ``dictionary * X'``.
1186
+
1187
+ transform_n_nonzero_coefs : int, default=None
1188
+ Number of nonzero coefficients to target in each column of the
1189
+ solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
1190
+ and is overridden by `alpha` in the `omp` case. If `None`, then
1191
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1192
+
1193
+ transform_alpha : float, default=None
1194
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1195
+ penalty applied to the L1 norm.
1196
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1197
+ threshold below which coefficients will be squashed to zero.
1198
+ If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
1199
+ the reconstruction error targeted. In this case, it overrides
1200
+ `n_nonzero_coefs`.
1201
+ If `None`, default to 1.
1202
+
1203
+ split_sign : bool, default=False
1204
+ Whether to split the sparse feature vector into the concatenation of
1205
+ its negative part and its positive part. This can improve the
1206
+ performance of downstream classifiers.
1207
+
1208
+ n_jobs : int, default=None
1209
+ Number of parallel jobs to run.
1210
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1211
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1212
+ for more details.
1213
+
1214
+ positive_code : bool, default=False
1215
+ Whether to enforce positivity when finding the code.
1216
+
1217
+ .. versionadded:: 0.20
1218
+
1219
+ transform_max_iter : int, default=1000
1220
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1221
+ `lasso_lars`.
1222
+
1223
+ .. versionadded:: 0.22
1224
+
1225
+ Attributes
1226
+ ----------
1227
+ n_components_ : int
1228
+ Number of atoms.
1229
+
1230
+ n_features_in_ : int
1231
+ Number of features seen during :term:`fit`.
1232
+
1233
+ .. versionadded:: 0.24
1234
+
1235
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1236
+ Names of features seen during :term:`fit`. Defined only when `X`
1237
+ has feature names that are all strings.
1238
+
1239
+ .. versionadded:: 1.0
1240
+
1241
+ See Also
1242
+ --------
1243
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1244
+ MiniBatchDictionaryLearning : A faster, less accurate, version of the
1245
+ dictionary learning algorithm.
1246
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1247
+ SparsePCA : Sparse Principal Components Analysis.
1248
+ sparse_encode : Sparse coding where each row of the result is the solution
1249
+ to a sparse coding problem.
1250
+
1251
+ Examples
1252
+ --------
1253
+ >>> import numpy as np
1254
+ >>> from sklearn.decomposition import SparseCoder
1255
+ >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
1256
+ >>> dictionary = np.array(
1257
+ ... [[0, 1, 0],
1258
+ ... [-1, -1, 2],
1259
+ ... [1, 1, 1],
1260
+ ... [0, 1, 1],
1261
+ ... [0, 2, 1]],
1262
+ ... dtype=np.float64
1263
+ ... )
1264
+ >>> coder = SparseCoder(
1265
+ ... dictionary=dictionary, transform_algorithm='lasso_lars',
1266
+ ... transform_alpha=1e-10,
1267
+ ... )
1268
+ >>> coder.transform(X)
1269
+ array([[ 0., 0., -1., 0., 0.],
1270
+ [ 0., 1., 1., 0., 0.]])
1271
+ """
1272
+
1273
+ _required_parameters = ["dictionary"]
1274
+
1275
+ def __init__(
1276
+ self,
1277
+ dictionary,
1278
+ *,
1279
+ transform_algorithm="omp",
1280
+ transform_n_nonzero_coefs=None,
1281
+ transform_alpha=None,
1282
+ split_sign=False,
1283
+ n_jobs=None,
1284
+ positive_code=False,
1285
+ transform_max_iter=1000,
1286
+ ):
1287
+ super().__init__(
1288
+ transform_algorithm,
1289
+ transform_n_nonzero_coefs,
1290
+ transform_alpha,
1291
+ split_sign,
1292
+ n_jobs,
1293
+ positive_code,
1294
+ transform_max_iter,
1295
+ )
1296
+ self.dictionary = dictionary
1297
+
1298
+ def fit(self, X, y=None):
1299
+ """Do nothing and return the estimator unchanged.
1300
+
1301
+ This method is just there to implement the usual API and hence
1302
+ work in pipelines.
1303
+
1304
+ Parameters
1305
+ ----------
1306
+ X : Ignored
1307
+ Not used, present for API consistency by convention.
1308
+
1309
+ y : Ignored
1310
+ Not used, present for API consistency by convention.
1311
+
1312
+ Returns
1313
+ -------
1314
+ self : object
1315
+ Returns the instance itself.
1316
+ """
1317
+ return self
1318
+
1319
+ def transform(self, X, y=None):
1320
+ """Encode the data as a sparse combination of the dictionary atoms.
1321
+
1322
+ Coding method is determined by the object parameter
1323
+ `transform_algorithm`.
1324
+
1325
+ Parameters
1326
+ ----------
1327
+ X : ndarray of shape (n_samples, n_features)
1328
+ Training vector, where `n_samples` is the number of samples
1329
+ and `n_features` is the number of features.
1330
+
1331
+ y : Ignored
1332
+ Not used, present for API consistency by convention.
1333
+
1334
+ Returns
1335
+ -------
1336
+ X_new : ndarray of shape (n_samples, n_components)
1337
+ Transformed data.
1338
+ """
1339
+ return super()._transform(X, self.dictionary)
1340
+
1341
+ def _more_tags(self):
1342
+ return {
1343
+ "requires_fit": False,
1344
+ "preserves_dtype": [np.float64, np.float32],
1345
+ }
1346
+
1347
+ @property
1348
+ def n_components_(self):
1349
+ """Number of atoms."""
1350
+ return self.dictionary.shape[0]
1351
+
1352
+ @property
1353
+ def n_features_in_(self):
1354
+ """Number of features seen during `fit`."""
1355
+ return self.dictionary.shape[1]
1356
+
1357
+ @property
1358
+ def _n_features_out(self):
1359
+ """Number of transformed output features."""
1360
+ return self.n_components_
1361
+
1362
+
1363
+ class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
1364
+ """Dictionary learning.
1365
+
1366
+ Finds a dictionary (a set of atoms) that performs well at sparsely
1367
+ encoding the fitted data.
1368
+
1369
+ Solves the optimization problem::
1370
+
1371
+ (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
1372
+ (U,V)
1373
+ with || V_k ||_2 <= 1 for all 0 <= k < n_components
1374
+
1375
+ ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
1376
+ the entry-wise matrix norm which is the sum of the absolute values
1377
+ of all the entries in the matrix.
1378
+
1379
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
1380
+
1381
+ Parameters
1382
+ ----------
1383
+ n_components : int, default=None
1384
+ Number of dictionary elements to extract. If None, then ``n_components``
1385
+ is set to ``n_features``.
1386
+
1387
+ alpha : float, default=1.0
1388
+ Sparsity controlling parameter.
1389
+
1390
+ max_iter : int, default=1000
1391
+ Maximum number of iterations to perform.
1392
+
1393
+ tol : float, default=1e-8
1394
+ Tolerance for numerical error.
1395
+
1396
+ fit_algorithm : {'lars', 'cd'}, default='lars'
1397
+ * `'lars'`: uses the least angle regression method to solve the lasso
1398
+ problem (:func:`~sklearn.linear_model.lars_path`);
1399
+ * `'cd'`: uses the coordinate descent method to compute the
1400
+ Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be
1401
+ faster if the estimated components are sparse.
1402
+
1403
+ .. versionadded:: 0.17
1404
+ *cd* coordinate descent method to improve speed.
1405
+
1406
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1407
+ 'threshold'}, default='omp'
1408
+ Algorithm used to transform the data:
1409
+
1410
+ - `'lars'`: uses the least angle regression method
1411
+ (:func:`~sklearn.linear_model.lars_path`);
1412
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution.
1413
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1414
+ Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`
1415
+ will be faster if the estimated components are sparse.
1416
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1417
+ solution.
1418
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1419
+ the projection ``dictionary * X'``.
1420
+
1421
+ .. versionadded:: 0.17
1422
+ *lasso_cd* coordinate descent method to improve speed.
1423
+
1424
+ transform_n_nonzero_coefs : int, default=None
1425
+ Number of nonzero coefficients to target in each column of the
1426
+ solution. This is only used by `algorithm='lars'` and
1427
+ `algorithm='omp'`. If `None`, then
1428
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1429
+
1430
+ transform_alpha : float, default=None
1431
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1432
+ penalty applied to the L1 norm.
1433
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1434
+ threshold below which coefficients will be squashed to zero.
1435
+ If `None`, defaults to `alpha`.
1436
+
1437
+ .. versionchanged:: 1.2
1438
+ When None, default value changed from 1.0 to `alpha`.
1439
+
1440
+ n_jobs : int or None, default=None
1441
+ Number of parallel jobs to run.
1442
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1443
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1444
+ for more details.
1445
+
1446
+ code_init : ndarray of shape (n_samples, n_components), default=None
1447
+ Initial value for the code, for warm restart. Only used if `code_init`
1448
+ and `dict_init` are not None.
1449
+
1450
+ dict_init : ndarray of shape (n_components, n_features), default=None
1451
+ Initial values for the dictionary, for warm restart. Only used if
1452
+ `code_init` and `dict_init` are not None.
1453
+
1454
+ callback : callable, default=None
1455
+ Callable that gets invoked every five iterations.
1456
+
1457
+ .. versionadded:: 1.3
1458
+
1459
+ verbose : bool, default=False
1460
+ To control the verbosity of the procedure.
1461
+
1462
+ split_sign : bool, default=False
1463
+ Whether to split the sparse feature vector into the concatenation of
1464
+ its negative part and its positive part. This can improve the
1465
+ performance of downstream classifiers.
1466
+
1467
+ random_state : int, RandomState instance or None, default=None
1468
+ Used for initializing the dictionary when ``dict_init`` is not
1469
+ specified, randomly shuffling the data when ``shuffle`` is set to
1470
+ ``True``, and updating the dictionary. Pass an int for reproducible
1471
+ results across multiple function calls.
1472
+ See :term:`Glossary <random_state>`.
1473
+
1474
+ positive_code : bool, default=False
1475
+ Whether to enforce positivity when finding the code.
1476
+
1477
+ .. versionadded:: 0.20
1478
+
1479
+ positive_dict : bool, default=False
1480
+ Whether to enforce positivity when finding the dictionary.
1481
+
1482
+ .. versionadded:: 0.20
1483
+
1484
+ transform_max_iter : int, default=1000
1485
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1486
+ `'lasso_lars'`.
1487
+
1488
+ .. versionadded:: 0.22
1489
+
1490
+ Attributes
1491
+ ----------
1492
+ components_ : ndarray of shape (n_components, n_features)
1493
+ dictionary atoms extracted from the data
1494
+
1495
+ error_ : array
1496
+ vector of errors at each iteration
1497
+
1498
+ n_features_in_ : int
1499
+ Number of features seen during :term:`fit`.
1500
+
1501
+ .. versionadded:: 0.24
1502
+
1503
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1504
+ Names of features seen during :term:`fit`. Defined only when `X`
1505
+ has feature names that are all strings.
1506
+
1507
+ .. versionadded:: 1.0
1508
+
1509
+ n_iter_ : int
1510
+ Number of iterations run.
1511
+
1512
+ See Also
1513
+ --------
1514
+ MiniBatchDictionaryLearning: A faster, less accurate, version of the
1515
+ dictionary learning algorithm.
1516
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1517
+ SparseCoder : Find a sparse representation of data from a fixed,
1518
+ precomputed dictionary.
1519
+ SparsePCA : Sparse Principal Components Analysis.
1520
+
1521
+ References
1522
+ ----------
1523
+
1524
+ J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
1525
+ for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
1526
+
1527
+ Examples
1528
+ --------
1529
+ >>> import numpy as np
1530
+ >>> from sklearn.datasets import make_sparse_coded_signal
1531
+ >>> from sklearn.decomposition import DictionaryLearning
1532
+ >>> X, dictionary, code = make_sparse_coded_signal(
1533
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1534
+ ... random_state=42,
1535
+ ... )
1536
+ >>> dict_learner = DictionaryLearning(
1537
+ ... n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.1,
1538
+ ... random_state=42,
1539
+ ... )
1540
+ >>> X_transformed = dict_learner.fit(X).transform(X)
1541
+
1542
+ We can check the level of sparsity of `X_transformed`:
1543
+
1544
+ >>> np.mean(X_transformed == 0)
1545
+ 0.52...
1546
+
1547
+ We can compare the average squared euclidean norm of the reconstruction
1548
+ error of the sparse coded signal relative to the squared euclidean norm of
1549
+ the original signal:
1550
+
1551
+ >>> X_hat = X_transformed @ dict_learner.components_
1552
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1553
+ 0.05...
1554
+ """
1555
+
1556
+ _parameter_constraints: dict = {
1557
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
1558
+ "alpha": [Interval(Real, 0, None, closed="left")],
1559
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
1560
+ "tol": [Interval(Real, 0, None, closed="left")],
1561
+ "fit_algorithm": [StrOptions({"lars", "cd"})],
1562
+ "transform_algorithm": [
1563
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
1564
+ ],
1565
+ "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
1566
+ "transform_alpha": [Interval(Real, 0, None, closed="left"), None],
1567
+ "n_jobs": [Integral, None],
1568
+ "code_init": [np.ndarray, None],
1569
+ "dict_init": [np.ndarray, None],
1570
+ "callback": [callable, None],
1571
+ "verbose": ["verbose"],
1572
+ "split_sign": ["boolean"],
1573
+ "random_state": ["random_state"],
1574
+ "positive_code": ["boolean"],
1575
+ "positive_dict": ["boolean"],
1576
+ "transform_max_iter": [Interval(Integral, 0, None, closed="left")],
1577
+ }
1578
+
1579
+ def __init__(
1580
+ self,
1581
+ n_components=None,
1582
+ *,
1583
+ alpha=1,
1584
+ max_iter=1000,
1585
+ tol=1e-8,
1586
+ fit_algorithm="lars",
1587
+ transform_algorithm="omp",
1588
+ transform_n_nonzero_coefs=None,
1589
+ transform_alpha=None,
1590
+ n_jobs=None,
1591
+ code_init=None,
1592
+ dict_init=None,
1593
+ callback=None,
1594
+ verbose=False,
1595
+ split_sign=False,
1596
+ random_state=None,
1597
+ positive_code=False,
1598
+ positive_dict=False,
1599
+ transform_max_iter=1000,
1600
+ ):
1601
+ super().__init__(
1602
+ transform_algorithm,
1603
+ transform_n_nonzero_coefs,
1604
+ transform_alpha,
1605
+ split_sign,
1606
+ n_jobs,
1607
+ positive_code,
1608
+ transform_max_iter,
1609
+ )
1610
+ self.n_components = n_components
1611
+ self.alpha = alpha
1612
+ self.max_iter = max_iter
1613
+ self.tol = tol
1614
+ self.fit_algorithm = fit_algorithm
1615
+ self.code_init = code_init
1616
+ self.dict_init = dict_init
1617
+ self.callback = callback
1618
+ self.verbose = verbose
1619
+ self.random_state = random_state
1620
+ self.positive_dict = positive_dict
1621
+
1622
+ def fit(self, X, y=None):
1623
+ """Fit the model from data in X.
1624
+
1625
+ Parameters
1626
+ ----------
1627
+ X : array-like of shape (n_samples, n_features)
1628
+ Training vector, where `n_samples` is the number of samples
1629
+ and `n_features` is the number of features.
1630
+
1631
+ y : Ignored
1632
+ Not used, present for API consistency by convention.
1633
+
1634
+ Returns
1635
+ -------
1636
+ self : object
1637
+ Returns the instance itself.
1638
+ """
1639
+ self.fit_transform(X)
1640
+ return self
1641
+
1642
+ @_fit_context(prefer_skip_nested_validation=True)
1643
+ def fit_transform(self, X, y=None):
1644
+ """Fit the model from data in X and return the transformed data.
1645
+
1646
+ Parameters
1647
+ ----------
1648
+ X : array-like of shape (n_samples, n_features)
1649
+ Training vector, where `n_samples` is the number of samples
1650
+ and `n_features` is the number of features.
1651
+
1652
+ y : Ignored
1653
+ Not used, present for API consistency by convention.
1654
+
1655
+ Returns
1656
+ -------
1657
+ V : ndarray of shape (n_samples, n_components)
1658
+ Transformed data.
1659
+ """
1660
+ _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code)
1661
+
1662
+ method = "lasso_" + self.fit_algorithm
1663
+
1664
+ random_state = check_random_state(self.random_state)
1665
+ X = self._validate_data(X)
1666
+
1667
+ if self.n_components is None:
1668
+ n_components = X.shape[1]
1669
+ else:
1670
+ n_components = self.n_components
1671
+
1672
+ V, U, E, self.n_iter_ = _dict_learning(
1673
+ X,
1674
+ n_components,
1675
+ alpha=self.alpha,
1676
+ tol=self.tol,
1677
+ max_iter=self.max_iter,
1678
+ method=method,
1679
+ method_max_iter=self.transform_max_iter,
1680
+ n_jobs=self.n_jobs,
1681
+ code_init=self.code_init,
1682
+ dict_init=self.dict_init,
1683
+ callback=self.callback,
1684
+ verbose=self.verbose,
1685
+ random_state=random_state,
1686
+ return_n_iter=True,
1687
+ positive_dict=self.positive_dict,
1688
+ positive_code=self.positive_code,
1689
+ )
1690
+ self.components_ = U
1691
+ self.error_ = E
1692
+
1693
+ return V
1694
+
1695
+ @property
1696
+ def _n_features_out(self):
1697
+ """Number of transformed output features."""
1698
+ return self.components_.shape[0]
1699
+
1700
+ def _more_tags(self):
1701
+ return {
1702
+ "preserves_dtype": [np.float64, np.float32],
1703
+ }
1704
+
1705
+
1706
+ class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
1707
+ """Mini-batch dictionary learning.
1708
+
1709
+ Finds a dictionary (a set of atoms) that performs well at sparsely
1710
+ encoding the fitted data.
1711
+
1712
+ Solves the optimization problem::
1713
+
1714
+ (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
1715
+ (U,V)
1716
+ with || V_k ||_2 <= 1 for all 0 <= k < n_components
1717
+
1718
+ ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
1719
+ the entry-wise matrix norm which is the sum of the absolute values
1720
+ of all the entries in the matrix.
1721
+
1722
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
1723
+
1724
+ Parameters
1725
+ ----------
1726
+ n_components : int, default=None
1727
+ Number of dictionary elements to extract.
1728
+
1729
+ alpha : float, default=1
1730
+ Sparsity controlling parameter.
1731
+
1732
+ max_iter : int, default=1_000
1733
+ Maximum number of iterations over the complete dataset before
1734
+ stopping independently of any early stopping criterion heuristics.
1735
+
1736
+ .. versionadded:: 1.1
1737
+
1738
+ .. deprecated:: 1.4
1739
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
1740
+ Use the default value (i.e. `1_000`) instead.
1741
+
1742
+ fit_algorithm : {'lars', 'cd'}, default='lars'
1743
+ The algorithm used:
1744
+
1745
+ - `'lars'`: uses the least angle regression method to solve the lasso
1746
+ problem (`linear_model.lars_path`)
1747
+ - `'cd'`: uses the coordinate descent method to compute the
1748
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
1749
+ the estimated components are sparse.
1750
+
1751
+ n_jobs : int, default=None
1752
+ Number of parallel jobs to run.
1753
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1754
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1755
+ for more details.
1756
+
1757
+ batch_size : int, default=256
1758
+ Number of samples in each mini-batch.
1759
+
1760
+ .. versionchanged:: 1.3
1761
+ The default value of `batch_size` changed from 3 to 256 in version 1.3.
1762
+
1763
+ shuffle : bool, default=True
1764
+ Whether to shuffle the samples before forming batches.
1765
+
1766
+ dict_init : ndarray of shape (n_components, n_features), default=None
1767
+ Initial value of the dictionary for warm restart scenarios.
1768
+
1769
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1770
+ 'threshold'}, default='omp'
1771
+ Algorithm used to transform the data:
1772
+
1773
+ - `'lars'`: uses the least angle regression method
1774
+ (`linear_model.lars_path`);
1775
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution.
1776
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1777
+ Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
1778
+ if the estimated components are sparse.
1779
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1780
+ solution.
1781
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1782
+ the projection ``dictionary * X'``.
1783
+
1784
+ transform_n_nonzero_coefs : int, default=None
1785
+ Number of nonzero coefficients to target in each column of the
1786
+ solution. This is only used by `algorithm='lars'` and
1787
+ `algorithm='omp'`. If `None`, then
1788
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1789
+
1790
+ transform_alpha : float, default=None
1791
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1792
+ penalty applied to the L1 norm.
1793
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1794
+ threshold below which coefficients will be squashed to zero.
1795
+ If `None`, defaults to `alpha`.
1796
+
1797
+ .. versionchanged:: 1.2
1798
+ When None, default value changed from 1.0 to `alpha`.
1799
+
1800
+ verbose : bool or int, default=False
1801
+ To control the verbosity of the procedure.
1802
+
1803
+ split_sign : bool, default=False
1804
+ Whether to split the sparse feature vector into the concatenation of
1805
+ its negative part and its positive part. This can improve the
1806
+ performance of downstream classifiers.
1807
+
1808
+ random_state : int, RandomState instance or None, default=None
1809
+ Used for initializing the dictionary when ``dict_init`` is not
1810
+ specified, randomly shuffling the data when ``shuffle`` is set to
1811
+ ``True``, and updating the dictionary. Pass an int for reproducible
1812
+ results across multiple function calls.
1813
+ See :term:`Glossary <random_state>`.
1814
+
1815
+ positive_code : bool, default=False
1816
+ Whether to enforce positivity when finding the code.
1817
+
1818
+ .. versionadded:: 0.20
1819
+
1820
+ positive_dict : bool, default=False
1821
+ Whether to enforce positivity when finding the dictionary.
1822
+
1823
+ .. versionadded:: 0.20
1824
+
1825
+ transform_max_iter : int, default=1000
1826
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1827
+ `'lasso_lars'`.
1828
+
1829
+ .. versionadded:: 0.22
1830
+
1831
+ callback : callable, default=None
1832
+ A callable that gets invoked at the end of each iteration.
1833
+
1834
+ .. versionadded:: 1.1
1835
+
1836
+ tol : float, default=1e-3
1837
+ Control early stopping based on the norm of the differences in the
1838
+ dictionary between 2 steps.
1839
+
1840
+ To disable early stopping based on changes in the dictionary, set
1841
+ `tol` to 0.0.
1842
+
1843
+ .. versionadded:: 1.1
1844
+
1845
+ max_no_improvement : int, default=10
1846
+ Control early stopping based on the consecutive number of mini batches
1847
+ that does not yield an improvement on the smoothed cost function.
1848
+
1849
+ To disable convergence detection based on cost function, set
1850
+ `max_no_improvement` to None.
1851
+
1852
+ .. versionadded:: 1.1
1853
+
1854
+ Attributes
1855
+ ----------
1856
+ components_ : ndarray of shape (n_components, n_features)
1857
+ Components extracted from the data.
1858
+
1859
+ n_features_in_ : int
1860
+ Number of features seen during :term:`fit`.
1861
+
1862
+ .. versionadded:: 0.24
1863
+
1864
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1865
+ Names of features seen during :term:`fit`. Defined only when `X`
1866
+ has feature names that are all strings.
1867
+
1868
+ .. versionadded:: 1.0
1869
+
1870
+ n_iter_ : int
1871
+ Number of iterations over the full dataset.
1872
+
1873
+ n_steps_ : int
1874
+ Number of mini-batches processed.
1875
+
1876
+ .. versionadded:: 1.1
1877
+
1878
+ See Also
1879
+ --------
1880
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1881
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1882
+ SparseCoder : Find a sparse representation of data from a fixed,
1883
+ precomputed dictionary.
1884
+ SparsePCA : Sparse Principal Components Analysis.
1885
+
1886
+ References
1887
+ ----------
1888
+
1889
+ J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
1890
+ for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
1891
+
1892
+ Examples
1893
+ --------
1894
+ >>> import numpy as np
1895
+ >>> from sklearn.datasets import make_sparse_coded_signal
1896
+ >>> from sklearn.decomposition import MiniBatchDictionaryLearning
1897
+ >>> X, dictionary, code = make_sparse_coded_signal(
1898
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1899
+ ... random_state=42)
1900
+ >>> dict_learner = MiniBatchDictionaryLearning(
1901
+ ... n_components=15, batch_size=3, transform_algorithm='lasso_lars',
1902
+ ... transform_alpha=0.1, max_iter=20, random_state=42)
1903
+ >>> X_transformed = dict_learner.fit_transform(X)
1904
+
1905
+ We can check the level of sparsity of `X_transformed`:
1906
+
1907
+ >>> np.mean(X_transformed == 0) > 0.5
1908
+ True
1909
+
1910
+ We can compare the average squared euclidean norm of the reconstruction
1911
+ error of the sparse coded signal relative to the squared euclidean norm of
1912
+ the original signal:
1913
+
1914
+ >>> X_hat = X_transformed @ dict_learner.components_
1915
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1916
+ 0.052...
1917
+ """
1918
+
1919
+ _parameter_constraints: dict = {
1920
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
1921
+ "alpha": [Interval(Real, 0, None, closed="left")],
1922
+ "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)],
1923
+ "fit_algorithm": [StrOptions({"cd", "lars"})],
1924
+ "n_jobs": [None, Integral],
1925
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
1926
+ "shuffle": ["boolean"],
1927
+ "dict_init": [None, np.ndarray],
1928
+ "transform_algorithm": [
1929
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
1930
+ ],
1931
+ "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
1932
+ "transform_alpha": [Interval(Real, 0, None, closed="left"), None],
1933
+ "verbose": ["verbose"],
1934
+ "split_sign": ["boolean"],
1935
+ "random_state": ["random_state"],
1936
+ "positive_code": ["boolean"],
1937
+ "positive_dict": ["boolean"],
1938
+ "transform_max_iter": [Interval(Integral, 0, None, closed="left")],
1939
+ "callback": [None, callable],
1940
+ "tol": [Interval(Real, 0, None, closed="left")],
1941
+ "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
1942
+ }
1943
+
1944
+ def __init__(
1945
+ self,
1946
+ n_components=None,
1947
+ *,
1948
+ alpha=1,
1949
+ max_iter=1_000,
1950
+ fit_algorithm="lars",
1951
+ n_jobs=None,
1952
+ batch_size=256,
1953
+ shuffle=True,
1954
+ dict_init=None,
1955
+ transform_algorithm="omp",
1956
+ transform_n_nonzero_coefs=None,
1957
+ transform_alpha=None,
1958
+ verbose=False,
1959
+ split_sign=False,
1960
+ random_state=None,
1961
+ positive_code=False,
1962
+ positive_dict=False,
1963
+ transform_max_iter=1000,
1964
+ callback=None,
1965
+ tol=1e-3,
1966
+ max_no_improvement=10,
1967
+ ):
1968
+ super().__init__(
1969
+ transform_algorithm,
1970
+ transform_n_nonzero_coefs,
1971
+ transform_alpha,
1972
+ split_sign,
1973
+ n_jobs,
1974
+ positive_code,
1975
+ transform_max_iter,
1976
+ )
1977
+ self.n_components = n_components
1978
+ self.alpha = alpha
1979
+ self.max_iter = max_iter
1980
+ self.fit_algorithm = fit_algorithm
1981
+ self.dict_init = dict_init
1982
+ self.verbose = verbose
1983
+ self.shuffle = shuffle
1984
+ self.batch_size = batch_size
1985
+ self.split_sign = split_sign
1986
+ self.random_state = random_state
1987
+ self.positive_dict = positive_dict
1988
+ self.callback = callback
1989
+ self.max_no_improvement = max_no_improvement
1990
+ self.tol = tol
1991
+
1992
+ def _check_params(self, X):
1993
+ # n_components
1994
+ self._n_components = self.n_components
1995
+ if self._n_components is None:
1996
+ self._n_components = X.shape[1]
1997
+
1998
+ # fit_algorithm
1999
+ _check_positive_coding(self.fit_algorithm, self.positive_code)
2000
+ self._fit_algorithm = "lasso_" + self.fit_algorithm
2001
+
2002
+ # batch_size
2003
+ self._batch_size = min(self.batch_size, X.shape[0])
2004
+
2005
+ def _initialize_dict(self, X, random_state):
2006
+ """Initialization of the dictionary."""
2007
+ if self.dict_init is not None:
2008
+ dictionary = self.dict_init
2009
+ else:
2010
+ # Init V with SVD of X
2011
+ _, S, dictionary = randomized_svd(
2012
+ X, self._n_components, random_state=random_state
2013
+ )
2014
+ dictionary = S[:, np.newaxis] * dictionary
2015
+
2016
+ if self._n_components <= len(dictionary):
2017
+ dictionary = dictionary[: self._n_components, :]
2018
+ else:
2019
+ dictionary = np.concatenate(
2020
+ (
2021
+ dictionary,
2022
+ np.zeros(
2023
+ (self._n_components - len(dictionary), dictionary.shape[1]),
2024
+ dtype=dictionary.dtype,
2025
+ ),
2026
+ )
2027
+ )
2028
+
2029
+ dictionary = check_array(dictionary, order="F", dtype=X.dtype, copy=False)
2030
+ dictionary = np.require(dictionary, requirements="W")
2031
+
2032
+ return dictionary
2033
+
2034
+ def _update_inner_stats(self, X, code, batch_size, step):
2035
+ """Update the inner stats inplace."""
2036
+ if step < batch_size - 1:
2037
+ theta = (step + 1) * batch_size
2038
+ else:
2039
+ theta = batch_size**2 + step + 1 - batch_size
2040
+ beta = (theta + 1 - batch_size) / (theta + 1)
2041
+
2042
+ self._A *= beta
2043
+ self._A += code.T @ code / batch_size
2044
+ self._B *= beta
2045
+ self._B += X.T @ code / batch_size
2046
+
2047
+ def _minibatch_step(self, X, dictionary, random_state, step):
2048
+ """Perform the update on the dictionary for one minibatch."""
2049
+ batch_size = X.shape[0]
2050
+
2051
+ # Compute code for this batch
2052
+ code = _sparse_encode(
2053
+ X,
2054
+ dictionary,
2055
+ algorithm=self._fit_algorithm,
2056
+ alpha=self.alpha,
2057
+ n_jobs=self.n_jobs,
2058
+ positive=self.positive_code,
2059
+ max_iter=self.transform_max_iter,
2060
+ verbose=self.verbose,
2061
+ )
2062
+
2063
+ batch_cost = (
2064
+ 0.5 * ((X - code @ dictionary) ** 2).sum()
2065
+ + self.alpha * np.sum(np.abs(code))
2066
+ ) / batch_size
2067
+
2068
+ # Update inner stats
2069
+ self._update_inner_stats(X, code, batch_size, step)
2070
+
2071
+ # Update dictionary
2072
+ _update_dict(
2073
+ dictionary,
2074
+ X,
2075
+ code,
2076
+ self._A,
2077
+ self._B,
2078
+ verbose=self.verbose,
2079
+ random_state=random_state,
2080
+ positive=self.positive_dict,
2081
+ )
2082
+
2083
+ return batch_cost
2084
+
2085
+ def _check_convergence(
2086
+ self, X, batch_cost, new_dict, old_dict, n_samples, step, n_steps
2087
+ ):
2088
+ """Helper function to encapsulate the early stopping logic.
2089
+
2090
+ Early stopping is based on two factors:
2091
+ - A small change of the dictionary between two minibatch updates. This is
2092
+ controlled by the tol parameter.
2093
+ - No more improvement on a smoothed estimate of the objective function for a
2094
+ a certain number of consecutive minibatch updates. This is controlled by
2095
+ the max_no_improvement parameter.
2096
+ """
2097
+ batch_size = X.shape[0]
2098
+
2099
+ # counts steps starting from 1 for user friendly verbose mode.
2100
+ step = step + 1
2101
+
2102
+ # Ignore 100 first steps or 1 epoch to avoid initializing the ewa_cost with a
2103
+ # too bad value
2104
+ if step <= min(100, n_samples / batch_size):
2105
+ if self.verbose:
2106
+ print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
2107
+ return False
2108
+
2109
+ # Compute an Exponentially Weighted Average of the cost function to
2110
+ # monitor the convergence while discarding minibatch-local stochastic
2111
+ # variability: https://en.wikipedia.org/wiki/Moving_average
2112
+ if self._ewa_cost is None:
2113
+ self._ewa_cost = batch_cost
2114
+ else:
2115
+ alpha = batch_size / (n_samples + 1)
2116
+ alpha = min(alpha, 1)
2117
+ self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
2118
+
2119
+ if self.verbose:
2120
+ print(
2121
+ f"Minibatch step {step}/{n_steps}: mean batch cost: "
2122
+ f"{batch_cost}, ewa cost: {self._ewa_cost}"
2123
+ )
2124
+
2125
+ # Early stopping based on change of dictionary
2126
+ dict_diff = linalg.norm(new_dict - old_dict) / self._n_components
2127
+ if self.tol > 0 and dict_diff <= self.tol:
2128
+ if self.verbose:
2129
+ print(f"Converged (small dictionary change) at step {step}/{n_steps}")
2130
+ return True
2131
+
2132
+ # Early stopping heuristic due to lack of improvement on smoothed
2133
+ # cost function
2134
+ if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
2135
+ self._no_improvement = 0
2136
+ self._ewa_cost_min = self._ewa_cost
2137
+ else:
2138
+ self._no_improvement += 1
2139
+
2140
+ if (
2141
+ self.max_no_improvement is not None
2142
+ and self._no_improvement >= self.max_no_improvement
2143
+ ):
2144
+ if self.verbose:
2145
+ print(
2146
+ "Converged (lack of improvement in objective function) "
2147
+ f"at step {step}/{n_steps}"
2148
+ )
2149
+ return True
2150
+
2151
+ return False
2152
+
2153
+ @_fit_context(prefer_skip_nested_validation=True)
2154
+ def fit(self, X, y=None):
2155
+ """Fit the model from data in X.
2156
+
2157
+ Parameters
2158
+ ----------
2159
+ X : array-like of shape (n_samples, n_features)
2160
+ Training vector, where `n_samples` is the number of samples
2161
+ and `n_features` is the number of features.
2162
+
2163
+ y : Ignored
2164
+ Not used, present for API consistency by convention.
2165
+
2166
+ Returns
2167
+ -------
2168
+ self : object
2169
+ Returns the instance itself.
2170
+ """
2171
+ X = self._validate_data(
2172
+ X, dtype=[np.float64, np.float32], order="C", copy=False
2173
+ )
2174
+
2175
+ self._check_params(X)
2176
+ self._random_state = check_random_state(self.random_state)
2177
+
2178
+ dictionary = self._initialize_dict(X, self._random_state)
2179
+ old_dict = dictionary.copy()
2180
+
2181
+ if self.shuffle:
2182
+ X_train = X.copy()
2183
+ self._random_state.shuffle(X_train)
2184
+ else:
2185
+ X_train = X
2186
+
2187
+ n_samples, n_features = X_train.shape
2188
+
2189
+ if self.verbose:
2190
+ print("[dict_learning]")
2191
+
2192
+ # Inner stats
2193
+ self._A = np.zeros(
2194
+ (self._n_components, self._n_components), dtype=X_train.dtype
2195
+ )
2196
+ self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype)
2197
+
2198
+ # TODO(1.6): remove in 1.6
2199
+ if self.max_iter is None:
2200
+ warn(
2201
+ (
2202
+ "`max_iter=None` is deprecated in version 1.4 and will be removed"
2203
+ " in version 1.6. Use the default value (i.e. `1_000`) instead."
2204
+ ),
2205
+ FutureWarning,
2206
+ )
2207
+ max_iter = 1_000
2208
+ else:
2209
+ max_iter = self.max_iter
2210
+
2211
+ # Attributes to monitor the convergence
2212
+ self._ewa_cost = None
2213
+ self._ewa_cost_min = None
2214
+ self._no_improvement = 0
2215
+
2216
+ batches = gen_batches(n_samples, self._batch_size)
2217
+ batches = itertools.cycle(batches)
2218
+ n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
2219
+ n_steps = max_iter * n_steps_per_iter
2220
+
2221
+ i = -1 # to allow max_iter = 0
2222
+
2223
+ for i, batch in zip(range(n_steps), batches):
2224
+ X_batch = X_train[batch]
2225
+
2226
+ batch_cost = self._minibatch_step(
2227
+ X_batch, dictionary, self._random_state, i
2228
+ )
2229
+
2230
+ if self._check_convergence(
2231
+ X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps
2232
+ ):
2233
+ break
2234
+
2235
+ # XXX callback param added for backward compat in #18975 but a common
2236
+ # unified callback API should be preferred
2237
+ if self.callback is not None:
2238
+ self.callback(locals())
2239
+
2240
+ old_dict[:] = dictionary
2241
+
2242
+ self.n_steps_ = i + 1
2243
+ self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter)
2244
+ self.components_ = dictionary
2245
+
2246
+ return self
2247
+
2248
+ @_fit_context(prefer_skip_nested_validation=True)
2249
+ def partial_fit(self, X, y=None):
2250
+ """Update the model using the data in X as a mini-batch.
2251
+
2252
+ Parameters
2253
+ ----------
2254
+ X : array-like of shape (n_samples, n_features)
2255
+ Training vector, where `n_samples` is the number of samples
2256
+ and `n_features` is the number of features.
2257
+
2258
+ y : Ignored
2259
+ Not used, present for API consistency by convention.
2260
+
2261
+ Returns
2262
+ -------
2263
+ self : object
2264
+ Return the instance itself.
2265
+ """
2266
+ has_components = hasattr(self, "components_")
2267
+
2268
+ X = self._validate_data(
2269
+ X, dtype=[np.float64, np.float32], order="C", reset=not has_components
2270
+ )
2271
+
2272
+ if not has_components:
2273
+ # This instance has not been fitted yet (fit or partial_fit)
2274
+ self._check_params(X)
2275
+ self._random_state = check_random_state(self.random_state)
2276
+
2277
+ dictionary = self._initialize_dict(X, self._random_state)
2278
+
2279
+ self.n_steps_ = 0
2280
+
2281
+ self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)
2282
+ self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)
2283
+ else:
2284
+ dictionary = self.components_
2285
+
2286
+ self._minibatch_step(X, dictionary, self._random_state, self.n_steps_)
2287
+
2288
+ self.components_ = dictionary
2289
+ self.n_steps_ += 1
2290
+
2291
+ return self
2292
+
2293
+ @property
2294
+ def _n_features_out(self):
2295
+ """Number of transformed output features."""
2296
+ return self.components_.shape[0]
2297
+
2298
+ def _more_tags(self):
2299
+ return {
2300
+ "preserves_dtype": [np.float64, np.float32],
2301
+ }
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Python implementation of the fast ICA algorithms.
3
+
4
+ Reference: Tables 8.3 and 8.4 page 196 in the book:
5
+ Independent Component Analysis, by Hyvarinen et al.
6
+ """
7
+
8
+ # Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
9
+ # Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
10
+ # License: BSD 3 clause
11
+
12
+ import warnings
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ from scipy import linalg
17
+
18
+ from ..base import (
19
+ BaseEstimator,
20
+ ClassNamePrefixFeaturesOutMixin,
21
+ TransformerMixin,
22
+ _fit_context,
23
+ )
24
+ from ..exceptions import ConvergenceWarning
25
+ from ..utils import as_float_array, check_array, check_random_state
26
+ from ..utils._param_validation import Interval, Options, StrOptions, validate_params
27
+ from ..utils.validation import check_is_fitted
28
+
29
+ __all__ = ["fastica", "FastICA"]
30
+
31
+
32
+ def _gs_decorrelation(w, W, j):
33
+ """
34
+ Orthonormalize w wrt the first j rows of W.
35
+
36
+ Parameters
37
+ ----------
38
+ w : ndarray of shape (n,)
39
+ Array to be orthogonalized
40
+
41
+ W : ndarray of shape (p, n)
42
+ Null space definition
43
+
44
+ j : int < p
45
+ The no of (from the first) rows of Null space W wrt which w is
46
+ orthogonalized.
47
+
48
+ Notes
49
+ -----
50
+ Assumes that W is orthogonal
51
+ w changed in place
52
+ """
53
+ w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
54
+ return w
55
+
56
+
57
+ def _sym_decorrelation(W):
58
+ """Symmetric decorrelation
59
+ i.e. W <- (W * W.T) ^{-1/2} * W
60
+ """
61
+ s, u = linalg.eigh(np.dot(W, W.T))
62
+ # Avoid sqrt of negative values because of rounding errors. Note that
63
+ # np.sqrt(tiny) is larger than tiny and therefore this clipping also
64
+ # prevents division by zero in the next step.
65
+ s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None)
66
+
67
+ # u (resp. s) contains the eigenvectors (resp. square roots of
68
+ # the eigenvalues) of W * W.T
69
+ return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])
70
+
71
+
72
+ def _ica_def(X, tol, g, fun_args, max_iter, w_init):
73
+ """Deflationary FastICA using fun approx to neg-entropy function
74
+
75
+ Used internally by FastICA.
76
+ """
77
+
78
+ n_components = w_init.shape[0]
79
+ W = np.zeros((n_components, n_components), dtype=X.dtype)
80
+ n_iter = []
81
+
82
+ # j is the index of the extracted component
83
+ for j in range(n_components):
84
+ w = w_init[j, :].copy()
85
+ w /= np.sqrt((w**2).sum())
86
+
87
+ for i in range(max_iter):
88
+ gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
89
+
90
+ w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
91
+
92
+ _gs_decorrelation(w1, W, j)
93
+
94
+ w1 /= np.sqrt((w1**2).sum())
95
+
96
+ lim = np.abs(np.abs((w1 * w).sum()) - 1)
97
+ w = w1
98
+ if lim < tol:
99
+ break
100
+
101
+ n_iter.append(i + 1)
102
+ W[j, :] = w
103
+
104
+ return W, max(n_iter)
105
+
106
+
107
+ def _ica_par(X, tol, g, fun_args, max_iter, w_init):
108
+ """Parallel FastICA.
109
+
110
+ Used internally by FastICA --main loop
111
+
112
+ """
113
+ W = _sym_decorrelation(w_init)
114
+ del w_init
115
+ p_ = float(X.shape[1])
116
+ for ii in range(max_iter):
117
+ gwtx, g_wtx = g(np.dot(W, X), fun_args)
118
+ W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)
119
+ del gwtx, g_wtx
120
+ # builtin max, abs are faster than numpy counter parts.
121
+ # np.einsum allows having the lowest memory footprint.
122
+ # It is faster than np.diag(np.dot(W1, W.T)).
123
+ lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1))
124
+ W = W1
125
+ if lim < tol:
126
+ break
127
+ else:
128
+ warnings.warn(
129
+ (
130
+ "FastICA did not converge. Consider increasing "
131
+ "tolerance or the maximum number of iterations."
132
+ ),
133
+ ConvergenceWarning,
134
+ )
135
+
136
+ return W, ii + 1
137
+
138
+
139
+ # Some standard non-linear functions.
140
+ # XXX: these should be optimized, as they can be a bottleneck.
141
+ def _logcosh(x, fun_args=None):
142
+ alpha = fun_args.get("alpha", 1.0) # comment it out?
143
+
144
+ x *= alpha
145
+ gx = np.tanh(x, x) # apply the tanh inplace
146
+ g_x = np.empty(x.shape[0], dtype=x.dtype)
147
+ # XXX compute in chunks to avoid extra allocation
148
+ for i, gx_i in enumerate(gx): # please don't vectorize.
149
+ g_x[i] = (alpha * (1 - gx_i**2)).mean()
150
+ return gx, g_x
151
+
152
+
153
+ def _exp(x, fun_args):
154
+ exp = np.exp(-(x**2) / 2)
155
+ gx = x * exp
156
+ g_x = (1 - x**2) * exp
157
+ return gx, g_x.mean(axis=-1)
158
+
159
+
160
+ def _cube(x, fun_args):
161
+ return x**3, (3 * x**2).mean(axis=-1)
162
+
163
+
164
+ @validate_params(
165
+ {
166
+ "X": ["array-like"],
167
+ "return_X_mean": ["boolean"],
168
+ "compute_sources": ["boolean"],
169
+ "return_n_iter": ["boolean"],
170
+ },
171
+ prefer_skip_nested_validation=False,
172
+ )
173
+ def fastica(
174
+ X,
175
+ n_components=None,
176
+ *,
177
+ algorithm="parallel",
178
+ whiten="unit-variance",
179
+ fun="logcosh",
180
+ fun_args=None,
181
+ max_iter=200,
182
+ tol=1e-04,
183
+ w_init=None,
184
+ whiten_solver="svd",
185
+ random_state=None,
186
+ return_X_mean=False,
187
+ compute_sources=True,
188
+ return_n_iter=False,
189
+ ):
190
+ """Perform Fast Independent Component Analysis.
191
+
192
+ The implementation is based on [1]_.
193
+
194
+ Read more in the :ref:`User Guide <ICA>`.
195
+
196
+ Parameters
197
+ ----------
198
+ X : array-like of shape (n_samples, n_features)
199
+ Training vector, where `n_samples` is the number of samples and
200
+ `n_features` is the number of features.
201
+
202
+ n_components : int, default=None
203
+ Number of components to use. If None is passed, all are used.
204
+
205
+ algorithm : {'parallel', 'deflation'}, default='parallel'
206
+ Specify which algorithm to use for FastICA.
207
+
208
+ whiten : str or bool, default='unit-variance'
209
+ Specify the whitening strategy to use.
210
+
211
+ - If 'arbitrary-variance', a whitening with variance
212
+ arbitrary is used.
213
+ - If 'unit-variance', the whitening matrix is rescaled to ensure that
214
+ each recovered source has unit variance.
215
+ - If False, the data is already considered to be whitened, and no
216
+ whitening is performed.
217
+
218
+ .. versionchanged:: 1.3
219
+ The default value of `whiten` changed to 'unit-variance' in 1.3.
220
+
221
+ fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
222
+ The functional form of the G function used in the
223
+ approximation to neg-entropy. Could be either 'logcosh', 'exp',
224
+ or 'cube'.
225
+ You can also provide your own function. It should return a tuple
226
+ containing the value of the function, and of its derivative, in the
227
+ point. The derivative should be averaged along its last dimension.
228
+ Example::
229
+
230
+ def my_g(x):
231
+ return x ** 3, (3 * x ** 2).mean(axis=-1)
232
+
233
+ fun_args : dict, default=None
234
+ Arguments to send to the functional form.
235
+ If empty or None and if fun='logcosh', fun_args will take value
236
+ {'alpha' : 1.0}.
237
+
238
+ max_iter : int, default=200
239
+ Maximum number of iterations to perform.
240
+
241
+ tol : float, default=1e-4
242
+ A positive scalar giving the tolerance at which the
243
+ un-mixing matrix is considered to have converged.
244
+
245
+ w_init : ndarray of shape (n_components, n_components), default=None
246
+ Initial un-mixing array. If `w_init=None`, then an array of values
247
+ drawn from a normal distribution is used.
248
+
249
+ whiten_solver : {"eigh", "svd"}, default="svd"
250
+ The solver to use for whitening.
251
+
252
+ - "svd" is more stable numerically if the problem is degenerate, and
253
+ often faster when `n_samples <= n_features`.
254
+
255
+ - "eigh" is generally more memory efficient when
256
+ `n_samples >= n_features`, and can be faster when
257
+ `n_samples >= 50 * n_features`.
258
+
259
+ .. versionadded:: 1.2
260
+
261
+ random_state : int, RandomState instance or None, default=None
262
+ Used to initialize ``w_init`` when not specified, with a
263
+ normal distribution. Pass an int, for reproducible results
264
+ across multiple function calls.
265
+ See :term:`Glossary <random_state>`.
266
+
267
+ return_X_mean : bool, default=False
268
+ If True, X_mean is returned too.
269
+
270
+ compute_sources : bool, default=True
271
+ If False, sources are not computed, but only the rotation matrix.
272
+ This can save memory when working with big data. Defaults to True.
273
+
274
+ return_n_iter : bool, default=False
275
+ Whether or not to return the number of iterations.
276
+
277
+ Returns
278
+ -------
279
+ K : ndarray of shape (n_components, n_features) or None
280
+ If whiten is 'True', K is the pre-whitening matrix that projects data
281
+ onto the first n_components principal components. If whiten is 'False',
282
+ K is 'None'.
283
+
284
+ W : ndarray of shape (n_components, n_components)
285
+ The square matrix that unmixes the data after whitening.
286
+ The mixing matrix is the pseudo-inverse of matrix ``W K``
287
+ if K is not None, else it is the inverse of W.
288
+
289
+ S : ndarray of shape (n_samples, n_components) or None
290
+ Estimated source matrix.
291
+
292
+ X_mean : ndarray of shape (n_features,)
293
+ The mean over features. Returned only if return_X_mean is True.
294
+
295
+ n_iter : int
296
+ If the algorithm is "deflation", n_iter is the
297
+ maximum number of iterations run across all components. Else
298
+ they are just the number of iterations taken to converge. This is
299
+ returned only when return_n_iter is set to `True`.
300
+
301
+ Notes
302
+ -----
303
+ The data matrix X is considered to be a linear combination of
304
+ non-Gaussian (independent) components i.e. X = AS where columns of S
305
+ contain the independent components and A is a linear mixing
306
+ matrix. In short ICA attempts to `un-mix' the data by estimating an
307
+ un-mixing matrix W where ``S = W K X.``
308
+ While FastICA was proposed to estimate as many sources
309
+ as features, it is possible to estimate less by setting
310
+ n_components < n_features. It this case K is not a square matrix
311
+ and the estimated A is the pseudo-inverse of ``W K``.
312
+
313
+ This implementation was originally made for data of shape
314
+ [n_features, n_samples]. Now the input is transposed
315
+ before the algorithm is applied. This makes it slightly
316
+ faster for Fortran-ordered input.
317
+
318
+ References
319
+ ----------
320
+ .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
321
+ Algorithms and Applications, Neural Networks, 13(4-5), 2000,
322
+ pp. 411-430.
323
+
324
+ Examples
325
+ --------
326
+ >>> from sklearn.datasets import load_digits
327
+ >>> from sklearn.decomposition import fastica
328
+ >>> X, _ = load_digits(return_X_y=True)
329
+ >>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance')
330
+ >>> K.shape
331
+ (7, 64)
332
+ >>> W.shape
333
+ (7, 7)
334
+ >>> S.shape
335
+ (1797, 7)
336
+ """
337
+ est = FastICA(
338
+ n_components=n_components,
339
+ algorithm=algorithm,
340
+ whiten=whiten,
341
+ fun=fun,
342
+ fun_args=fun_args,
343
+ max_iter=max_iter,
344
+ tol=tol,
345
+ w_init=w_init,
346
+ whiten_solver=whiten_solver,
347
+ random_state=random_state,
348
+ )
349
+ est._validate_params()
350
+ S = est._fit_transform(X, compute_sources=compute_sources)
351
+
352
+ if est.whiten in ["unit-variance", "arbitrary-variance"]:
353
+ K = est.whitening_
354
+ X_mean = est.mean_
355
+ else:
356
+ K = None
357
+ X_mean = None
358
+
359
+ returned_values = [K, est._unmixing, S]
360
+ if return_X_mean:
361
+ returned_values.append(X_mean)
362
+ if return_n_iter:
363
+ returned_values.append(est.n_iter_)
364
+
365
+ return returned_values
366
+
367
+
368
+ class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
369
+ """FastICA: a fast algorithm for Independent Component Analysis.
370
+
371
+ The implementation is based on [1]_.
372
+
373
+ Read more in the :ref:`User Guide <ICA>`.
374
+
375
+ Parameters
376
+ ----------
377
+ n_components : int, default=None
378
+ Number of components to use. If None is passed, all are used.
379
+
380
+ algorithm : {'parallel', 'deflation'}, default='parallel'
381
+ Specify which algorithm to use for FastICA.
382
+
383
+ whiten : str or bool, default='unit-variance'
384
+ Specify the whitening strategy to use.
385
+
386
+ - If 'arbitrary-variance', a whitening with variance
387
+ arbitrary is used.
388
+ - If 'unit-variance', the whitening matrix is rescaled to ensure that
389
+ each recovered source has unit variance.
390
+ - If False, the data is already considered to be whitened, and no
391
+ whitening is performed.
392
+
393
+ .. versionchanged:: 1.3
394
+ The default value of `whiten` changed to 'unit-variance' in 1.3.
395
+
396
+ fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
397
+ The functional form of the G function used in the
398
+ approximation to neg-entropy. Could be either 'logcosh', 'exp',
399
+ or 'cube'.
400
+ You can also provide your own function. It should return a tuple
401
+ containing the value of the function, and of its derivative, in the
402
+ point. The derivative should be averaged along its last dimension.
403
+ Example::
404
+
405
+ def my_g(x):
406
+ return x ** 3, (3 * x ** 2).mean(axis=-1)
407
+
408
+ fun_args : dict, default=None
409
+ Arguments to send to the functional form.
410
+ If empty or None and if fun='logcosh', fun_args will take value
411
+ {'alpha' : 1.0}.
412
+
413
+ max_iter : int, default=200
414
+ Maximum number of iterations during fit.
415
+
416
+ tol : float, default=1e-4
417
+ A positive scalar giving the tolerance at which the
418
+ un-mixing matrix is considered to have converged.
419
+
420
+ w_init : array-like of shape (n_components, n_components), default=None
421
+ Initial un-mixing array. If `w_init=None`, then an array of values
422
+ drawn from a normal distribution is used.
423
+
424
+ whiten_solver : {"eigh", "svd"}, default="svd"
425
+ The solver to use for whitening.
426
+
427
+ - "svd" is more stable numerically if the problem is degenerate, and
428
+ often faster when `n_samples <= n_features`.
429
+
430
+ - "eigh" is generally more memory efficient when
431
+ `n_samples >= n_features`, and can be faster when
432
+ `n_samples >= 50 * n_features`.
433
+
434
+ .. versionadded:: 1.2
435
+
436
+ random_state : int, RandomState instance or None, default=None
437
+ Used to initialize ``w_init`` when not specified, with a
438
+ normal distribution. Pass an int, for reproducible results
439
+ across multiple function calls.
440
+ See :term:`Glossary <random_state>`.
441
+
442
+ Attributes
443
+ ----------
444
+ components_ : ndarray of shape (n_components, n_features)
445
+ The linear operator to apply to the data to get the independent
446
+ sources. This is equal to the unmixing matrix when ``whiten`` is
447
+ False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
448
+ ``whiten`` is True.
449
+
450
+ mixing_ : ndarray of shape (n_features, n_components)
451
+ The pseudo-inverse of ``components_``. It is the linear operator
452
+ that maps independent sources to the data.
453
+
454
+ mean_ : ndarray of shape(n_features,)
455
+ The mean over features. Only set if `self.whiten` is True.
456
+
457
+ n_features_in_ : int
458
+ Number of features seen during :term:`fit`.
459
+
460
+ .. versionadded:: 0.24
461
+
462
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
463
+ Names of features seen during :term:`fit`. Defined only when `X`
464
+ has feature names that are all strings.
465
+
466
+ .. versionadded:: 1.0
467
+
468
+ n_iter_ : int
469
+ If the algorithm is "deflation", n_iter is the
470
+ maximum number of iterations run across all components. Else
471
+ they are just the number of iterations taken to converge.
472
+
473
+ whitening_ : ndarray of shape (n_components, n_features)
474
+ Only set if whiten is 'True'. This is the pre-whitening matrix
475
+ that projects data onto the first `n_components` principal components.
476
+
477
+ See Also
478
+ --------
479
+ PCA : Principal component analysis (PCA).
480
+ IncrementalPCA : Incremental principal components analysis (IPCA).
481
+ KernelPCA : Kernel Principal component analysis (KPCA).
482
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
483
+ SparsePCA : Sparse Principal Components Analysis (SparsePCA).
484
+
485
+ References
486
+ ----------
487
+ .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:
488
+ Algorithms and Applications, Neural Networks, 13(4-5), 2000,
489
+ pp. 411-430.
490
+
491
+ Examples
492
+ --------
493
+ >>> from sklearn.datasets import load_digits
494
+ >>> from sklearn.decomposition import FastICA
495
+ >>> X, _ = load_digits(return_X_y=True)
496
+ >>> transformer = FastICA(n_components=7,
497
+ ... random_state=0,
498
+ ... whiten='unit-variance')
499
+ >>> X_transformed = transformer.fit_transform(X)
500
+ >>> X_transformed.shape
501
+ (1797, 7)
502
+ """
503
+
504
+ _parameter_constraints: dict = {
505
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
506
+ "algorithm": [StrOptions({"parallel", "deflation"})],
507
+ "whiten": [
508
+ StrOptions({"arbitrary-variance", "unit-variance"}),
509
+ Options(bool, {False}),
510
+ ],
511
+ "fun": [StrOptions({"logcosh", "exp", "cube"}), callable],
512
+ "fun_args": [dict, None],
513
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
514
+ "tol": [Interval(Real, 0.0, None, closed="left")],
515
+ "w_init": ["array-like", None],
516
+ "whiten_solver": [StrOptions({"eigh", "svd"})],
517
+ "random_state": ["random_state"],
518
+ }
519
+
520
+ def __init__(
521
+ self,
522
+ n_components=None,
523
+ *,
524
+ algorithm="parallel",
525
+ whiten="unit-variance",
526
+ fun="logcosh",
527
+ fun_args=None,
528
+ max_iter=200,
529
+ tol=1e-4,
530
+ w_init=None,
531
+ whiten_solver="svd",
532
+ random_state=None,
533
+ ):
534
+ super().__init__()
535
+ self.n_components = n_components
536
+ self.algorithm = algorithm
537
+ self.whiten = whiten
538
+ self.fun = fun
539
+ self.fun_args = fun_args
540
+ self.max_iter = max_iter
541
+ self.tol = tol
542
+ self.w_init = w_init
543
+ self.whiten_solver = whiten_solver
544
+ self.random_state = random_state
545
+
546
+ def _fit_transform(self, X, compute_sources=False):
547
+ """Fit the model.
548
+
549
+ Parameters
550
+ ----------
551
+ X : array-like of shape (n_samples, n_features)
552
+ Training data, where `n_samples` is the number of samples
553
+ and `n_features` is the number of features.
554
+
555
+ compute_sources : bool, default=False
556
+ If False, sources are not computes but only the rotation matrix.
557
+ This can save memory when working with big data. Defaults to False.
558
+
559
+ Returns
560
+ -------
561
+ S : ndarray of shape (n_samples, n_components) or None
562
+ Sources matrix. `None` if `compute_sources` is `False`.
563
+ """
564
+ XT = self._validate_data(
565
+ X, copy=self.whiten, dtype=[np.float64, np.float32], ensure_min_samples=2
566
+ ).T
567
+ fun_args = {} if self.fun_args is None else self.fun_args
568
+ random_state = check_random_state(self.random_state)
569
+
570
+ alpha = fun_args.get("alpha", 1.0)
571
+ if not 1 <= alpha <= 2:
572
+ raise ValueError("alpha must be in [1,2]")
573
+
574
+ if self.fun == "logcosh":
575
+ g = _logcosh
576
+ elif self.fun == "exp":
577
+ g = _exp
578
+ elif self.fun == "cube":
579
+ g = _cube
580
+ elif callable(self.fun):
581
+
582
+ def g(x, fun_args):
583
+ return self.fun(x, **fun_args)
584
+
585
+ n_features, n_samples = XT.shape
586
+ n_components = self.n_components
587
+ if not self.whiten and n_components is not None:
588
+ n_components = None
589
+ warnings.warn("Ignoring n_components with whiten=False.")
590
+
591
+ if n_components is None:
592
+ n_components = min(n_samples, n_features)
593
+ if n_components > min(n_samples, n_features):
594
+ n_components = min(n_samples, n_features)
595
+ warnings.warn(
596
+ "n_components is too large: it will be set to %s" % n_components
597
+ )
598
+
599
+ if self.whiten:
600
+ # Centering the features of X
601
+ X_mean = XT.mean(axis=-1)
602
+ XT -= X_mean[:, np.newaxis]
603
+
604
+ # Whitening and preprocessing by PCA
605
+ if self.whiten_solver == "eigh":
606
+ # Faster when num_samples >> n_features
607
+ d, u = linalg.eigh(XT.dot(X))
608
+ sort_indices = np.argsort(d)[::-1]
609
+ eps = np.finfo(d.dtype).eps
610
+ degenerate_idx = d < eps
611
+ if np.any(degenerate_idx):
612
+ warnings.warn(
613
+ "There are some small singular values, using "
614
+ "whiten_solver = 'svd' might lead to more "
615
+ "accurate results."
616
+ )
617
+ d[degenerate_idx] = eps # For numerical issues
618
+ np.sqrt(d, out=d)
619
+ d, u = d[sort_indices], u[:, sort_indices]
620
+ elif self.whiten_solver == "svd":
621
+ u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
622
+
623
+ # Give consistent eigenvectors for both svd solvers
624
+ u *= np.sign(u[0])
625
+
626
+ K = (u / d).T[:n_components] # see (6.33) p.140
627
+ del u, d
628
+ X1 = np.dot(K, XT)
629
+ # see (13.6) p.267 Here X1 is white and data
630
+ # in X has been projected onto a subspace by PCA
631
+ X1 *= np.sqrt(n_samples)
632
+ else:
633
+ # X must be casted to floats to avoid typing issues with numpy
634
+ # 2.0 and the line below
635
+ X1 = as_float_array(XT, copy=False) # copy has been taken care of
636
+
637
+ w_init = self.w_init
638
+ if w_init is None:
639
+ w_init = np.asarray(
640
+ random_state.normal(size=(n_components, n_components)), dtype=X1.dtype
641
+ )
642
+
643
+ else:
644
+ w_init = np.asarray(w_init)
645
+ if w_init.shape != (n_components, n_components):
646
+ raise ValueError(
647
+ "w_init has invalid shape -- should be %(shape)s"
648
+ % {"shape": (n_components, n_components)}
649
+ )
650
+
651
+ kwargs = {
652
+ "tol": self.tol,
653
+ "g": g,
654
+ "fun_args": fun_args,
655
+ "max_iter": self.max_iter,
656
+ "w_init": w_init,
657
+ }
658
+
659
+ if self.algorithm == "parallel":
660
+ W, n_iter = _ica_par(X1, **kwargs)
661
+ elif self.algorithm == "deflation":
662
+ W, n_iter = _ica_def(X1, **kwargs)
663
+ del X1
664
+
665
+ self.n_iter_ = n_iter
666
+
667
+ if compute_sources:
668
+ if self.whiten:
669
+ S = np.linalg.multi_dot([W, K, XT]).T
670
+ else:
671
+ S = np.dot(W, XT).T
672
+ else:
673
+ S = None
674
+
675
+ if self.whiten:
676
+ if self.whiten == "unit-variance":
677
+ if not compute_sources:
678
+ S = np.linalg.multi_dot([W, K, XT]).T
679
+ S_std = np.std(S, axis=0, keepdims=True)
680
+ S /= S_std
681
+ W /= S_std.T
682
+
683
+ self.components_ = np.dot(W, K)
684
+ self.mean_ = X_mean
685
+ self.whitening_ = K
686
+ else:
687
+ self.components_ = W
688
+
689
+ self.mixing_ = linalg.pinv(self.components_, check_finite=False)
690
+ self._unmixing = W
691
+
692
+ return S
693
+
694
+ @_fit_context(prefer_skip_nested_validation=True)
695
+ def fit_transform(self, X, y=None):
696
+ """Fit the model and recover the sources from X.
697
+
698
+ Parameters
699
+ ----------
700
+ X : array-like of shape (n_samples, n_features)
701
+ Training data, where `n_samples` is the number of samples
702
+ and `n_features` is the number of features.
703
+
704
+ y : Ignored
705
+ Not used, present for API consistency by convention.
706
+
707
+ Returns
708
+ -------
709
+ X_new : ndarray of shape (n_samples, n_components)
710
+ Estimated sources obtained by transforming the data with the
711
+ estimated unmixing matrix.
712
+ """
713
+ return self._fit_transform(X, compute_sources=True)
714
+
715
+ @_fit_context(prefer_skip_nested_validation=True)
716
+ def fit(self, X, y=None):
717
+ """Fit the model to X.
718
+
719
+ Parameters
720
+ ----------
721
+ X : array-like of shape (n_samples, n_features)
722
+ Training data, where `n_samples` is the number of samples
723
+ and `n_features` is the number of features.
724
+
725
+ y : Ignored
726
+ Not used, present for API consistency by convention.
727
+
728
+ Returns
729
+ -------
730
+ self : object
731
+ Returns the instance itself.
732
+ """
733
+ self._fit_transform(X, compute_sources=False)
734
+ return self
735
+
736
+ def transform(self, X, copy=True):
737
+ """Recover the sources from X (apply the unmixing matrix).
738
+
739
+ Parameters
740
+ ----------
741
+ X : array-like of shape (n_samples, n_features)
742
+ Data to transform, where `n_samples` is the number of samples
743
+ and `n_features` is the number of features.
744
+
745
+ copy : bool, default=True
746
+ If False, data passed to fit can be overwritten. Defaults to True.
747
+
748
+ Returns
749
+ -------
750
+ X_new : ndarray of shape (n_samples, n_components)
751
+ Estimated sources obtained by transforming the data with the
752
+ estimated unmixing matrix.
753
+ """
754
+ check_is_fitted(self)
755
+
756
+ X = self._validate_data(
757
+ X, copy=(copy and self.whiten), dtype=[np.float64, np.float32], reset=False
758
+ )
759
+ if self.whiten:
760
+ X -= self.mean_
761
+
762
+ return np.dot(X, self.components_.T)
763
+
764
+ def inverse_transform(self, X, copy=True):
765
+ """Transform the sources back to the mixed data (apply mixing matrix).
766
+
767
+ Parameters
768
+ ----------
769
+ X : array-like of shape (n_samples, n_components)
770
+ Sources, where `n_samples` is the number of samples
771
+ and `n_components` is the number of components.
772
+ copy : bool, default=True
773
+ If False, data passed to fit are overwritten. Defaults to True.
774
+
775
+ Returns
776
+ -------
777
+ X_new : ndarray of shape (n_samples, n_features)
778
+ Reconstructed data obtained with the mixing matrix.
779
+ """
780
+ check_is_fitted(self)
781
+
782
+ X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32])
783
+ X = np.dot(X, self.mixing_.T)
784
+ if self.whiten:
785
+ X += self.mean_
786
+
787
+ return X
788
+
789
+ @property
790
+ def _n_features_out(self):
791
+ """Number of transformed output features."""
792
+ return self.components_.shape[0]
793
+
794
+ def _more_tags(self):
795
+ return {"preserves_dtype": [np.float32, np.float64]}
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Kernel Principal Components Analysis."""
2
+
3
+ # Author: Mathieu Blondel <[email protected]>
4
+ # Sylvain Marie <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ from scipy import linalg
11
+ from scipy.linalg import eigh
12
+ from scipy.sparse.linalg import eigsh
13
+
14
+ from ..base import (
15
+ BaseEstimator,
16
+ ClassNamePrefixFeaturesOutMixin,
17
+ TransformerMixin,
18
+ _fit_context,
19
+ )
20
+ from ..exceptions import NotFittedError
21
+ from ..metrics.pairwise import pairwise_kernels
22
+ from ..preprocessing import KernelCenterer
23
+ from ..utils._arpack import _init_arpack_v0
24
+ from ..utils._param_validation import Interval, StrOptions
25
+ from ..utils.extmath import _randomized_eigsh, svd_flip
26
+ from ..utils.validation import (
27
+ _check_psd_eigenvalues,
28
+ check_is_fitted,
29
+ )
30
+
31
+
32
+ class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
33
+ """Kernel Principal component analysis (KPCA) [1]_.
34
+
35
+ Non-linear dimensionality reduction through the use of kernels (see
36
+ :ref:`metrics`).
37
+
38
+ It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD
39
+ or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the
40
+ truncated SVD, depending on the shape of the input data and the number of
41
+ components to extract. It can also use a randomized truncated SVD by the
42
+ method proposed in [3]_, see `eigen_solver`.
43
+
44
+ For a usage example, see
45
+ :ref:`sphx_glr_auto_examples_decomposition_plot_kernel_pca.py`.
46
+
47
+ Read more in the :ref:`User Guide <kernel_PCA>`.
48
+
49
+ Parameters
50
+ ----------
51
+ n_components : int, default=None
52
+ Number of components. If None, all non-zero components are kept.
53
+
54
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \
55
+ or callable, default='linear'
56
+ Kernel used for PCA.
57
+
58
+ gamma : float, default=None
59
+ Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
60
+ kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``.
61
+
62
+ degree : float, default=3
63
+ Degree for poly kernels. Ignored by other kernels.
64
+
65
+ coef0 : float, default=1
66
+ Independent term in poly and sigmoid kernels.
67
+ Ignored by other kernels.
68
+
69
+ kernel_params : dict, default=None
70
+ Parameters (keyword arguments) and
71
+ values for kernel passed as callable object.
72
+ Ignored by other kernels.
73
+
74
+ alpha : float, default=1.0
75
+ Hyperparameter of the ridge regression that learns the
76
+ inverse transform (when fit_inverse_transform=True).
77
+
78
+ fit_inverse_transform : bool, default=False
79
+ Learn the inverse transform for non-precomputed kernels
80
+ (i.e. learn to find the pre-image of a point). This method is based
81
+ on [2]_.
82
+
83
+ eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \
84
+ default='auto'
85
+ Select eigensolver to use. If `n_components` is much
86
+ less than the number of training samples, randomized (or arpack to a
87
+ smaller extent) may be more efficient than the dense eigensolver.
88
+ Randomized SVD is performed according to the method of Halko et al
89
+ [3]_.
90
+
91
+ auto :
92
+ the solver is selected by a default policy based on n_samples
93
+ (the number of training samples) and `n_components`:
94
+ if the number of components to extract is less than 10 (strict) and
95
+ the number of samples is more than 200 (strict), the 'arpack'
96
+ method is enabled. Otherwise the exact full eigenvalue
97
+ decomposition is computed and optionally truncated afterwards
98
+ ('dense' method).
99
+ dense :
100
+ run exact full eigenvalue decomposition calling the standard
101
+ LAPACK solver via `scipy.linalg.eigh`, and select the components
102
+ by postprocessing
103
+ arpack :
104
+ run SVD truncated to n_components calling ARPACK solver using
105
+ `scipy.sparse.linalg.eigsh`. It requires strictly
106
+ 0 < n_components < n_samples
107
+ randomized :
108
+ run randomized SVD by the method of Halko et al. [3]_. The current
109
+ implementation selects eigenvalues based on their module; therefore
110
+ using this method can lead to unexpected results if the kernel is
111
+ not positive semi-definite. See also [4]_.
112
+
113
+ .. versionchanged:: 1.0
114
+ `'randomized'` was added.
115
+
116
+ tol : float, default=0
117
+ Convergence tolerance for arpack.
118
+ If 0, optimal value will be chosen by arpack.
119
+
120
+ max_iter : int, default=None
121
+ Maximum number of iterations for arpack.
122
+ If None, optimal value will be chosen by arpack.
123
+
124
+ iterated_power : int >= 0, or 'auto', default='auto'
125
+ Number of iterations for the power method computed by
126
+ svd_solver == 'randomized'. When 'auto', it is set to 7 when
127
+ `n_components < 0.1 * min(X.shape)`, other it is set to 4.
128
+
129
+ .. versionadded:: 1.0
130
+
131
+ remove_zero_eig : bool, default=False
132
+ If True, then all components with zero eigenvalues are removed, so
133
+ that the number of components in the output may be < n_components
134
+ (and sometimes even zero due to numerical instability).
135
+ When n_components is None, this parameter is ignored and components
136
+ with zero eigenvalues are removed regardless.
137
+
138
+ random_state : int, RandomState instance or None, default=None
139
+ Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int
140
+ for reproducible results across multiple function calls.
141
+ See :term:`Glossary <random_state>`.
142
+
143
+ .. versionadded:: 0.18
144
+
145
+ copy_X : bool, default=True
146
+ If True, input X is copied and stored by the model in the `X_fit_`
147
+ attribute. If no further changes will be done to X, setting
148
+ `copy_X=False` saves memory by storing a reference.
149
+
150
+ .. versionadded:: 0.18
151
+
152
+ n_jobs : int, default=None
153
+ The number of parallel jobs to run.
154
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
155
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
156
+ for more details.
157
+
158
+ .. versionadded:: 0.18
159
+
160
+ Attributes
161
+ ----------
162
+ eigenvalues_ : ndarray of shape (n_components,)
163
+ Eigenvalues of the centered kernel matrix in decreasing order.
164
+ If `n_components` and `remove_zero_eig` are not set,
165
+ then all values are stored.
166
+
167
+ eigenvectors_ : ndarray of shape (n_samples, n_components)
168
+ Eigenvectors of the centered kernel matrix. If `n_components` and
169
+ `remove_zero_eig` are not set, then all components are stored.
170
+
171
+ dual_coef_ : ndarray of shape (n_samples, n_features)
172
+ Inverse transform matrix. Only available when
173
+ ``fit_inverse_transform`` is True.
174
+
175
+ X_transformed_fit_ : ndarray of shape (n_samples, n_components)
176
+ Projection of the fitted data on the kernel principal components.
177
+ Only available when ``fit_inverse_transform`` is True.
178
+
179
+ X_fit_ : ndarray of shape (n_samples, n_features)
180
+ The data used to fit the model. If `copy_X=False`, then `X_fit_` is
181
+ a reference. This attribute is used for the calls to transform.
182
+
183
+ n_features_in_ : int
184
+ Number of features seen during :term:`fit`.
185
+
186
+ .. versionadded:: 0.24
187
+
188
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
189
+ Names of features seen during :term:`fit`. Defined only when `X`
190
+ has feature names that are all strings.
191
+
192
+ .. versionadded:: 1.0
193
+
194
+ gamma_ : float
195
+ Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma`
196
+ is explicitly provided, this is just the same as `gamma`. When `gamma`
197
+ is `None`, this is the actual value of kernel coefficient.
198
+
199
+ .. versionadded:: 1.3
200
+
201
+ See Also
202
+ --------
203
+ FastICA : A fast algorithm for Independent Component Analysis.
204
+ IncrementalPCA : Incremental Principal Component Analysis.
205
+ NMF : Non-Negative Matrix Factorization.
206
+ PCA : Principal Component Analysis.
207
+ SparsePCA : Sparse Principal Component Analysis.
208
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
209
+
210
+ References
211
+ ----------
212
+ .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
213
+ "Kernel principal component analysis."
214
+ International conference on artificial neural networks.
215
+ Springer, Berlin, Heidelberg, 1997.
216
+ <https://people.eecs.berkeley.edu/~wainwrig/stat241b/scholkopf_kernel.pdf>`_
217
+
218
+ .. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
219
+ "Learning to find pre-images."
220
+ Advances in neural information processing systems 16 (2004): 449-456.
221
+ <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
222
+
223
+ .. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp.
224
+ "Finding structure with randomness: Probabilistic algorithms for
225
+ constructing approximate matrix decompositions."
226
+ SIAM review 53.2 (2011): 217-288. <0909.4061>`
227
+
228
+ .. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert.
229
+ "A randomized algorithm for the decomposition of matrices."
230
+ Applied and Computational Harmonic Analysis 30.1 (2011): 47-68.
231
+ <https://www.sciencedirect.com/science/article/pii/S1063520310000242>`_
232
+
233
+ Examples
234
+ --------
235
+ >>> from sklearn.datasets import load_digits
236
+ >>> from sklearn.decomposition import KernelPCA
237
+ >>> X, _ = load_digits(return_X_y=True)
238
+ >>> transformer = KernelPCA(n_components=7, kernel='linear')
239
+ >>> X_transformed = transformer.fit_transform(X)
240
+ >>> X_transformed.shape
241
+ (1797, 7)
242
+ """
243
+
244
+ _parameter_constraints: dict = {
245
+ "n_components": [
246
+ Interval(Integral, 1, None, closed="left"),
247
+ None,
248
+ ],
249
+ "kernel": [
250
+ StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}),
251
+ callable,
252
+ ],
253
+ "gamma": [
254
+ Interval(Real, 0, None, closed="left"),
255
+ None,
256
+ ],
257
+ "degree": [Interval(Real, 0, None, closed="left")],
258
+ "coef0": [Interval(Real, None, None, closed="neither")],
259
+ "kernel_params": [dict, None],
260
+ "alpha": [Interval(Real, 0, None, closed="left")],
261
+ "fit_inverse_transform": ["boolean"],
262
+ "eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})],
263
+ "tol": [Interval(Real, 0, None, closed="left")],
264
+ "max_iter": [
265
+ Interval(Integral, 1, None, closed="left"),
266
+ None,
267
+ ],
268
+ "iterated_power": [
269
+ Interval(Integral, 0, None, closed="left"),
270
+ StrOptions({"auto"}),
271
+ ],
272
+ "remove_zero_eig": ["boolean"],
273
+ "random_state": ["random_state"],
274
+ "copy_X": ["boolean"],
275
+ "n_jobs": [None, Integral],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ n_components=None,
281
+ *,
282
+ kernel="linear",
283
+ gamma=None,
284
+ degree=3,
285
+ coef0=1,
286
+ kernel_params=None,
287
+ alpha=1.0,
288
+ fit_inverse_transform=False,
289
+ eigen_solver="auto",
290
+ tol=0,
291
+ max_iter=None,
292
+ iterated_power="auto",
293
+ remove_zero_eig=False,
294
+ random_state=None,
295
+ copy_X=True,
296
+ n_jobs=None,
297
+ ):
298
+ self.n_components = n_components
299
+ self.kernel = kernel
300
+ self.kernel_params = kernel_params
301
+ self.gamma = gamma
302
+ self.degree = degree
303
+ self.coef0 = coef0
304
+ self.alpha = alpha
305
+ self.fit_inverse_transform = fit_inverse_transform
306
+ self.eigen_solver = eigen_solver
307
+ self.tol = tol
308
+ self.max_iter = max_iter
309
+ self.iterated_power = iterated_power
310
+ self.remove_zero_eig = remove_zero_eig
311
+ self.random_state = random_state
312
+ self.n_jobs = n_jobs
313
+ self.copy_X = copy_X
314
+
315
+ def _get_kernel(self, X, Y=None):
316
+ if callable(self.kernel):
317
+ params = self.kernel_params or {}
318
+ else:
319
+ params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0}
320
+ return pairwise_kernels(
321
+ X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params
322
+ )
323
+
324
+ def _fit_transform(self, K):
325
+ """Fit's using kernel K"""
326
+ # center kernel
327
+ K = self._centerer.fit_transform(K)
328
+
329
+ # adjust n_components according to user inputs
330
+ if self.n_components is None:
331
+ n_components = K.shape[0] # use all dimensions
332
+ else:
333
+ n_components = min(K.shape[0], self.n_components)
334
+
335
+ # compute eigenvectors
336
+ if self.eigen_solver == "auto":
337
+ if K.shape[0] > 200 and n_components < 10:
338
+ eigen_solver = "arpack"
339
+ else:
340
+ eigen_solver = "dense"
341
+ else:
342
+ eigen_solver = self.eigen_solver
343
+
344
+ if eigen_solver == "dense":
345
+ # Note: subset_by_index specifies the indices of smallest/largest to return
346
+ self.eigenvalues_, self.eigenvectors_ = eigh(
347
+ K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1)
348
+ )
349
+ elif eigen_solver == "arpack":
350
+ v0 = _init_arpack_v0(K.shape[0], self.random_state)
351
+ self.eigenvalues_, self.eigenvectors_ = eigsh(
352
+ K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0
353
+ )
354
+ elif eigen_solver == "randomized":
355
+ self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh(
356
+ K,
357
+ n_components=n_components,
358
+ n_iter=self.iterated_power,
359
+ random_state=self.random_state,
360
+ selection="module",
361
+ )
362
+
363
+ # make sure that the eigenvalues are ok and fix numerical issues
364
+ self.eigenvalues_ = _check_psd_eigenvalues(
365
+ self.eigenvalues_, enable_warnings=False
366
+ )
367
+
368
+ # flip eigenvectors' sign to enforce deterministic output
369
+ self.eigenvectors_, _ = svd_flip(
370
+ self.eigenvectors_, np.zeros_like(self.eigenvectors_).T
371
+ )
372
+
373
+ # sort eigenvectors in descending order
374
+ indices = self.eigenvalues_.argsort()[::-1]
375
+ self.eigenvalues_ = self.eigenvalues_[indices]
376
+ self.eigenvectors_ = self.eigenvectors_[:, indices]
377
+
378
+ # remove eigenvectors with a zero eigenvalue (null space) if required
379
+ if self.remove_zero_eig or self.n_components is None:
380
+ self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0]
381
+ self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0]
382
+
383
+ # Maintenance note on Eigenvectors normalization
384
+ # ----------------------------------------------
385
+ # there is a link between
386
+ # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)'
387
+ # if v is an eigenvector of K
388
+ # then Phi(X)v is an eigenvector of Phi(X)Phi(X)'
389
+ # if u is an eigenvector of Phi(X)Phi(X)'
390
+ # then Phi(X)'u is an eigenvector of Phi(X)'Phi(X)
391
+ #
392
+ # At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale
393
+ # them so that eigenvectors in kernel feature space (the u) have norm=1
394
+ # instead
395
+ #
396
+ # We COULD scale them here:
397
+ # self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_)
398
+ #
399
+ # But choose to perform that LATER when needed, in `fit()` and in
400
+ # `transform()`.
401
+
402
+ return K
403
+
404
+ def _fit_inverse_transform(self, X_transformed, X):
405
+ if hasattr(X, "tocsr"):
406
+ raise NotImplementedError(
407
+ "Inverse transform not implemented for sparse matrices!"
408
+ )
409
+
410
+ n_samples = X_transformed.shape[0]
411
+ K = self._get_kernel(X_transformed)
412
+ K.flat[:: n_samples + 1] += self.alpha
413
+ self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True)
414
+ self.X_transformed_fit_ = X_transformed
415
+
416
+ @_fit_context(prefer_skip_nested_validation=True)
417
+ def fit(self, X, y=None):
418
+ """Fit the model from data in X.
419
+
420
+ Parameters
421
+ ----------
422
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
423
+ Training vector, where `n_samples` is the number of samples
424
+ and `n_features` is the number of features.
425
+
426
+ y : Ignored
427
+ Not used, present for API consistency by convention.
428
+
429
+ Returns
430
+ -------
431
+ self : object
432
+ Returns the instance itself.
433
+ """
434
+ if self.fit_inverse_transform and self.kernel == "precomputed":
435
+ raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.")
436
+ X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X)
437
+ self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma
438
+ self._centerer = KernelCenterer().set_output(transform="default")
439
+ K = self._get_kernel(X)
440
+ self._fit_transform(K)
441
+
442
+ if self.fit_inverse_transform:
443
+ # no need to use the kernel to transform X, use shortcut expression
444
+ X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
445
+
446
+ self._fit_inverse_transform(X_transformed, X)
447
+
448
+ self.X_fit_ = X
449
+ return self
450
+
451
+ def fit_transform(self, X, y=None, **params):
452
+ """Fit the model from data in X and transform X.
453
+
454
+ Parameters
455
+ ----------
456
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
457
+ Training vector, where `n_samples` is the number of samples
458
+ and `n_features` is the number of features.
459
+
460
+ y : Ignored
461
+ Not used, present for API consistency by convention.
462
+
463
+ **params : kwargs
464
+ Parameters (keyword arguments) and values passed to
465
+ the fit_transform instance.
466
+
467
+ Returns
468
+ -------
469
+ X_new : ndarray of shape (n_samples, n_components)
470
+ Returns the instance itself.
471
+ """
472
+ self.fit(X, **params)
473
+
474
+ # no need to use the kernel to transform X, use shortcut expression
475
+ X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
476
+
477
+ if self.fit_inverse_transform:
478
+ self._fit_inverse_transform(X_transformed, X)
479
+
480
+ return X_transformed
481
+
482
+ def transform(self, X):
483
+ """Transform X.
484
+
485
+ Parameters
486
+ ----------
487
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
488
+ Training vector, where `n_samples` is the number of samples
489
+ and `n_features` is the number of features.
490
+
491
+ Returns
492
+ -------
493
+ X_new : ndarray of shape (n_samples, n_components)
494
+ Returns the instance itself.
495
+ """
496
+ check_is_fitted(self)
497
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
498
+
499
+ # Compute centered gram matrix between X and training data X_fit_
500
+ K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
501
+
502
+ # scale eigenvectors (properly account for null-space for dot product)
503
+ non_zeros = np.flatnonzero(self.eigenvalues_)
504
+ scaled_alphas = np.zeros_like(self.eigenvectors_)
505
+ scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt(
506
+ self.eigenvalues_[non_zeros]
507
+ )
508
+
509
+ # Project with a scalar product between K and the scaled eigenvectors
510
+ return np.dot(K, scaled_alphas)
511
+
512
+ def inverse_transform(self, X):
513
+ """Transform X back to original space.
514
+
515
+ ``inverse_transform`` approximates the inverse transformation using
516
+ a learned pre-image. The pre-image is learned by kernel ridge
517
+ regression of the original data on their low-dimensional representation
518
+ vectors.
519
+
520
+ .. note:
521
+ :meth:`~sklearn.decomposition.fit` internally uses a centered
522
+ kernel. As the centered kernel no longer contains the information
523
+ of the mean of kernel features, such information is not taken into
524
+ account in reconstruction.
525
+
526
+ .. note::
527
+ When users want to compute inverse transformation for 'linear'
528
+ kernel, it is recommended that they use
529
+ :class:`~sklearn.decomposition.PCA` instead. Unlike
530
+ :class:`~sklearn.decomposition.PCA`,
531
+ :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform``
532
+ does not reconstruct the mean of data when 'linear' kernel is used
533
+ due to the use of centered kernel.
534
+
535
+ Parameters
536
+ ----------
537
+ X : {array-like, sparse matrix} of shape (n_samples, n_components)
538
+ Training vector, where `n_samples` is the number of samples
539
+ and `n_features` is the number of features.
540
+
541
+ Returns
542
+ -------
543
+ X_new : ndarray of shape (n_samples, n_features)
544
+ Returns the instance itself.
545
+
546
+ References
547
+ ----------
548
+ `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
549
+ "Learning to find pre-images."
550
+ Advances in neural information processing systems 16 (2004): 449-456.
551
+ <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
552
+ """
553
+ if not self.fit_inverse_transform:
554
+ raise NotFittedError(
555
+ "The fit_inverse_transform parameter was not"
556
+ " set to True when instantiating and hence "
557
+ "the inverse transform is not available."
558
+ )
559
+
560
+ K = self._get_kernel(X, self.X_transformed_fit_)
561
+ return np.dot(K, self.dual_coef_)
562
+
563
+ def _more_tags(self):
564
+ return {
565
+ "preserves_dtype": [np.float64, np.float32],
566
+ "pairwise": self.kernel == "precomputed",
567
+ }
568
+
569
+ @property
570
+ def _n_features_out(self):
571
+ """Number of transformed output features."""
572
+ return self.eigenvalues_.shape[0]
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_lda.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ =============================================================
4
+ Online Latent Dirichlet Allocation with variational inference
5
+ =============================================================
6
+
7
+ This implementation is modified from Matthew D. Hoffman's onlineldavb code
8
+ Link: https://github.com/blei-lab/onlineldavb
9
+ """
10
+
11
+ # Author: Chyi-Kwei Yau
12
+ # Author: Matthew D. Hoffman (original onlineldavb implementation)
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ import scipy.sparse as sp
17
+ from joblib import effective_n_jobs
18
+ from scipy.special import gammaln, logsumexp
19
+
20
+ from ..base import (
21
+ BaseEstimator,
22
+ ClassNamePrefixFeaturesOutMixin,
23
+ TransformerMixin,
24
+ _fit_context,
25
+ )
26
+ from ..utils import check_random_state, gen_batches, gen_even_slices
27
+ from ..utils._param_validation import Interval, StrOptions
28
+ from ..utils.parallel import Parallel, delayed
29
+ from ..utils.validation import check_is_fitted, check_non_negative
30
+ from ._online_lda_fast import (
31
+ _dirichlet_expectation_1d as cy_dirichlet_expectation_1d,
32
+ )
33
+ from ._online_lda_fast import (
34
+ _dirichlet_expectation_2d,
35
+ )
36
+ from ._online_lda_fast import (
37
+ mean_change as cy_mean_change,
38
+ )
39
+
40
+ EPS = np.finfo(float).eps
41
+
42
+
43
+ def _update_doc_distribution(
44
+ X,
45
+ exp_topic_word_distr,
46
+ doc_topic_prior,
47
+ max_doc_update_iter,
48
+ mean_change_tol,
49
+ cal_sstats,
50
+ random_state,
51
+ ):
52
+ """E-step: update document-topic distribution.
53
+
54
+ Parameters
55
+ ----------
56
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
57
+ Document word matrix.
58
+
59
+ exp_topic_word_distr : ndarray of shape (n_topics, n_features)
60
+ Exponential value of expectation of log topic word distribution.
61
+ In the literature, this is `exp(E[log(beta)])`.
62
+
63
+ doc_topic_prior : float
64
+ Prior of document topic distribution `theta`.
65
+
66
+ max_doc_update_iter : int
67
+ Max number of iterations for updating document topic distribution in
68
+ the E-step.
69
+
70
+ mean_change_tol : float
71
+ Stopping tolerance for updating document topic distribution in E-step.
72
+
73
+ cal_sstats : bool
74
+ Parameter that indicate to calculate sufficient statistics or not.
75
+ Set `cal_sstats` to `True` when we need to run M-step.
76
+
77
+ random_state : RandomState instance or None
78
+ Parameter that indicate how to initialize document topic distribution.
79
+ Set `random_state` to None will initialize document topic distribution
80
+ to a constant number.
81
+
82
+ Returns
83
+ -------
84
+ (doc_topic_distr, suff_stats) :
85
+ `doc_topic_distr` is unnormalized topic distribution for each document.
86
+ In the literature, this is `gamma`. we can calculate `E[log(theta)]`
87
+ from it.
88
+ `suff_stats` is expected sufficient statistics for the M-step.
89
+ When `cal_sstats == False`, this will be None.
90
+
91
+ """
92
+ is_sparse_x = sp.issparse(X)
93
+ n_samples, n_features = X.shape
94
+ n_topics = exp_topic_word_distr.shape[0]
95
+
96
+ if random_state:
97
+ doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype(
98
+ X.dtype, copy=False
99
+ )
100
+ else:
101
+ doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype)
102
+
103
+ # In the literature, this is `exp(E[log(theta)])`
104
+ exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
105
+
106
+ # diff on `component_` (only calculate it when `cal_diff` is True)
107
+ suff_stats = (
108
+ np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None
109
+ )
110
+
111
+ if is_sparse_x:
112
+ X_data = X.data
113
+ X_indices = X.indices
114
+ X_indptr = X.indptr
115
+
116
+ # These cython functions are called in a nested loop on usually very small arrays
117
+ # (length=n_topics). In that case, finding the appropriate signature of the
118
+ # fused-typed function can be more costly than its execution, hence the dispatch
119
+ # is done outside of the loop.
120
+ ctype = "float" if X.dtype == np.float32 else "double"
121
+ mean_change = cy_mean_change[ctype]
122
+ dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype]
123
+ eps = np.finfo(X.dtype).eps
124
+
125
+ for idx_d in range(n_samples):
126
+ if is_sparse_x:
127
+ ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
128
+ cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
129
+ else:
130
+ ids = np.nonzero(X[idx_d, :])[0]
131
+ cnts = X[idx_d, ids]
132
+
133
+ doc_topic_d = doc_topic_distr[idx_d, :]
134
+ # The next one is a copy, since the inner loop overwrites it.
135
+ exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
136
+ exp_topic_word_d = exp_topic_word_distr[:, ids]
137
+
138
+ # Iterate between `doc_topic_d` and `norm_phi` until convergence
139
+ for _ in range(0, max_doc_update_iter):
140
+ last_d = doc_topic_d
141
+
142
+ # The optimal phi_{dwk} is proportional to
143
+ # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
144
+ norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
145
+
146
+ doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T)
147
+ # Note: adds doc_topic_prior to doc_topic_d, in-place.
148
+ dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d)
149
+
150
+ if mean_change(last_d, doc_topic_d) < mean_change_tol:
151
+ break
152
+ doc_topic_distr[idx_d, :] = doc_topic_d
153
+
154
+ # Contribution of document d to the expected sufficient
155
+ # statistics for the M step.
156
+ if cal_sstats:
157
+ norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
158
+ suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
159
+
160
+ return (doc_topic_distr, suff_stats)
161
+
162
+
163
+ class LatentDirichletAllocation(
164
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
165
+ ):
166
+ """Latent Dirichlet Allocation with online variational Bayes algorithm.
167
+
168
+ The implementation is based on [1]_ and [2]_.
169
+
170
+ .. versionadded:: 0.17
171
+
172
+ Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
173
+
174
+ Parameters
175
+ ----------
176
+ n_components : int, default=10
177
+ Number of topics.
178
+
179
+ .. versionchanged:: 0.19
180
+ ``n_topics`` was renamed to ``n_components``
181
+
182
+ doc_topic_prior : float, default=None
183
+ Prior of document topic distribution `theta`. If the value is None,
184
+ defaults to `1 / n_components`.
185
+ In [1]_, this is called `alpha`.
186
+
187
+ topic_word_prior : float, default=None
188
+ Prior of topic word distribution `beta`. If the value is None, defaults
189
+ to `1 / n_components`.
190
+ In [1]_, this is called `eta`.
191
+
192
+ learning_method : {'batch', 'online'}, default='batch'
193
+ Method used to update `_component`. Only used in :meth:`fit` method.
194
+ In general, if the data size is large, the online update will be much
195
+ faster than the batch update.
196
+
197
+ Valid options::
198
+
199
+ 'batch': Batch variational Bayes method. Use all training data in
200
+ each EM update.
201
+ Old `components_` will be overwritten in each iteration.
202
+ 'online': Online variational Bayes method. In each EM update, use
203
+ mini-batch of training data to update the ``components_``
204
+ variable incrementally. The learning rate is controlled by the
205
+ ``learning_decay`` and the ``learning_offset`` parameters.
206
+
207
+ .. versionchanged:: 0.20
208
+ The default learning method is now ``"batch"``.
209
+
210
+ learning_decay : float, default=0.7
211
+ It is a parameter that control learning rate in the online learning
212
+ method. The value should be set between (0.5, 1.0] to guarantee
213
+ asymptotic convergence. When the value is 0.0 and batch_size is
214
+ ``n_samples``, the update method is same as batch learning. In the
215
+ literature, this is called kappa.
216
+
217
+ learning_offset : float, default=10.0
218
+ A (positive) parameter that downweights early iterations in online
219
+ learning. It should be greater than 1.0. In the literature, this is
220
+ called tau_0.
221
+
222
+ max_iter : int, default=10
223
+ The maximum number of passes over the training data (aka epochs).
224
+ It only impacts the behavior in the :meth:`fit` method, and not the
225
+ :meth:`partial_fit` method.
226
+
227
+ batch_size : int, default=128
228
+ Number of documents to use in each EM iteration. Only used in online
229
+ learning.
230
+
231
+ evaluate_every : int, default=-1
232
+ How often to evaluate perplexity. Only used in `fit` method.
233
+ set it to 0 or negative number to not evaluate perplexity in
234
+ training at all. Evaluating perplexity can help you check convergence
235
+ in training process, but it will also increase total training time.
236
+ Evaluating perplexity in every iteration might increase training time
237
+ up to two-fold.
238
+
239
+ total_samples : int, default=1e6
240
+ Total number of documents. Only used in the :meth:`partial_fit` method.
241
+
242
+ perp_tol : float, default=1e-1
243
+ Perplexity tolerance in batch learning. Only used when
244
+ ``evaluate_every`` is greater than 0.
245
+
246
+ mean_change_tol : float, default=1e-3
247
+ Stopping tolerance for updating document topic distribution in E-step.
248
+
249
+ max_doc_update_iter : int, default=100
250
+ Max number of iterations for updating document topic distribution in
251
+ the E-step.
252
+
253
+ n_jobs : int, default=None
254
+ The number of jobs to use in the E-step.
255
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
256
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
257
+ for more details.
258
+
259
+ verbose : int, default=0
260
+ Verbosity level.
261
+
262
+ random_state : int, RandomState instance or None, default=None
263
+ Pass an int for reproducible results across multiple function calls.
264
+ See :term:`Glossary <random_state>`.
265
+
266
+ Attributes
267
+ ----------
268
+ components_ : ndarray of shape (n_components, n_features)
269
+ Variational parameters for topic word distribution. Since the complete
270
+ conditional for topic word distribution is a Dirichlet,
271
+ ``components_[i, j]`` can be viewed as pseudocount that represents the
272
+ number of times word `j` was assigned to topic `i`.
273
+ It can also be viewed as distribution over the words for each topic
274
+ after normalization:
275
+ ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
276
+
277
+ exp_dirichlet_component_ : ndarray of shape (n_components, n_features)
278
+ Exponential value of expectation of log topic word distribution.
279
+ In the literature, this is `exp(E[log(beta)])`.
280
+
281
+ n_batch_iter_ : int
282
+ Number of iterations of the EM step.
283
+
284
+ n_features_in_ : int
285
+ Number of features seen during :term:`fit`.
286
+
287
+ .. versionadded:: 0.24
288
+
289
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
290
+ Names of features seen during :term:`fit`. Defined only when `X`
291
+ has feature names that are all strings.
292
+
293
+ .. versionadded:: 1.0
294
+
295
+ n_iter_ : int
296
+ Number of passes over the dataset.
297
+
298
+ bound_ : float
299
+ Final perplexity score on training set.
300
+
301
+ doc_topic_prior_ : float
302
+ Prior of document topic distribution `theta`. If the value is None,
303
+ it is `1 / n_components`.
304
+
305
+ random_state_ : RandomState instance
306
+ RandomState instance that is generated either from a seed, the random
307
+ number generator or by `np.random`.
308
+
309
+ topic_word_prior_ : float
310
+ Prior of topic word distribution `beta`. If the value is None, it is
311
+ `1 / n_components`.
312
+
313
+ See Also
314
+ --------
315
+ sklearn.discriminant_analysis.LinearDiscriminantAnalysis:
316
+ A classifier with a linear decision boundary, generated by fitting
317
+ class conditional densities to the data and using Bayes' rule.
318
+
319
+ References
320
+ ----------
321
+ .. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D.
322
+ Hoffman, David M. Blei, Francis Bach, 2010
323
+ https://github.com/blei-lab/onlineldavb
324
+
325
+ .. [2] "Stochastic Variational Inference", Matthew D. Hoffman,
326
+ David M. Blei, Chong Wang, John Paisley, 2013
327
+
328
+ Examples
329
+ --------
330
+ >>> from sklearn.decomposition import LatentDirichletAllocation
331
+ >>> from sklearn.datasets import make_multilabel_classification
332
+ >>> # This produces a feature matrix of token counts, similar to what
333
+ >>> # CountVectorizer would produce on text.
334
+ >>> X, _ = make_multilabel_classification(random_state=0)
335
+ >>> lda = LatentDirichletAllocation(n_components=5,
336
+ ... random_state=0)
337
+ >>> lda.fit(X)
338
+ LatentDirichletAllocation(...)
339
+ >>> # get topics for some given samples:
340
+ >>> lda.transform(X[-2:])
341
+ array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846],
342
+ [0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]])
343
+ """
344
+
345
+ _parameter_constraints: dict = {
346
+ "n_components": [Interval(Integral, 0, None, closed="neither")],
347
+ "doc_topic_prior": [None, Interval(Real, 0, 1, closed="both")],
348
+ "topic_word_prior": [None, Interval(Real, 0, 1, closed="both")],
349
+ "learning_method": [StrOptions({"batch", "online"})],
350
+ "learning_decay": [Interval(Real, 0, 1, closed="both")],
351
+ "learning_offset": [Interval(Real, 1.0, None, closed="left")],
352
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
353
+ "batch_size": [Interval(Integral, 0, None, closed="neither")],
354
+ "evaluate_every": [Interval(Integral, None, None, closed="neither")],
355
+ "total_samples": [Interval(Real, 0, None, closed="neither")],
356
+ "perp_tol": [Interval(Real, 0, None, closed="left")],
357
+ "mean_change_tol": [Interval(Real, 0, None, closed="left")],
358
+ "max_doc_update_iter": [Interval(Integral, 0, None, closed="left")],
359
+ "n_jobs": [None, Integral],
360
+ "verbose": ["verbose"],
361
+ "random_state": ["random_state"],
362
+ }
363
+
364
+ def __init__(
365
+ self,
366
+ n_components=10,
367
+ *,
368
+ doc_topic_prior=None,
369
+ topic_word_prior=None,
370
+ learning_method="batch",
371
+ learning_decay=0.7,
372
+ learning_offset=10.0,
373
+ max_iter=10,
374
+ batch_size=128,
375
+ evaluate_every=-1,
376
+ total_samples=1e6,
377
+ perp_tol=1e-1,
378
+ mean_change_tol=1e-3,
379
+ max_doc_update_iter=100,
380
+ n_jobs=None,
381
+ verbose=0,
382
+ random_state=None,
383
+ ):
384
+ self.n_components = n_components
385
+ self.doc_topic_prior = doc_topic_prior
386
+ self.topic_word_prior = topic_word_prior
387
+ self.learning_method = learning_method
388
+ self.learning_decay = learning_decay
389
+ self.learning_offset = learning_offset
390
+ self.max_iter = max_iter
391
+ self.batch_size = batch_size
392
+ self.evaluate_every = evaluate_every
393
+ self.total_samples = total_samples
394
+ self.perp_tol = perp_tol
395
+ self.mean_change_tol = mean_change_tol
396
+ self.max_doc_update_iter = max_doc_update_iter
397
+ self.n_jobs = n_jobs
398
+ self.verbose = verbose
399
+ self.random_state = random_state
400
+
401
+ def _init_latent_vars(self, n_features, dtype=np.float64):
402
+ """Initialize latent variables."""
403
+
404
+ self.random_state_ = check_random_state(self.random_state)
405
+ self.n_batch_iter_ = 1
406
+ self.n_iter_ = 0
407
+
408
+ if self.doc_topic_prior is None:
409
+ self.doc_topic_prior_ = 1.0 / self.n_components
410
+ else:
411
+ self.doc_topic_prior_ = self.doc_topic_prior
412
+
413
+ if self.topic_word_prior is None:
414
+ self.topic_word_prior_ = 1.0 / self.n_components
415
+ else:
416
+ self.topic_word_prior_ = self.topic_word_prior
417
+
418
+ init_gamma = 100.0
419
+ init_var = 1.0 / init_gamma
420
+ # In the literature, this is called `lambda`
421
+ self.components_ = self.random_state_.gamma(
422
+ init_gamma, init_var, (self.n_components, n_features)
423
+ ).astype(dtype, copy=False)
424
+
425
+ # In the literature, this is `exp(E[log(beta)])`
426
+ self.exp_dirichlet_component_ = np.exp(
427
+ _dirichlet_expectation_2d(self.components_)
428
+ )
429
+
430
+ def _e_step(self, X, cal_sstats, random_init, parallel=None):
431
+ """E-step in EM update.
432
+
433
+ Parameters
434
+ ----------
435
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
436
+ Document word matrix.
437
+
438
+ cal_sstats : bool
439
+ Parameter that indicate whether to calculate sufficient statistics
440
+ or not. Set ``cal_sstats`` to True when we need to run M-step.
441
+
442
+ random_init : bool
443
+ Parameter that indicate whether to initialize document topic
444
+ distribution randomly in the E-step. Set it to True in training
445
+ steps.
446
+
447
+ parallel : joblib.Parallel, default=None
448
+ Pre-initialized instance of joblib.Parallel.
449
+
450
+ Returns
451
+ -------
452
+ (doc_topic_distr, suff_stats) :
453
+ `doc_topic_distr` is unnormalized topic distribution for each
454
+ document. In the literature, this is called `gamma`.
455
+ `suff_stats` is expected sufficient statistics for the M-step.
456
+ When `cal_sstats == False`, it will be None.
457
+
458
+ """
459
+
460
+ # Run e-step in parallel
461
+ random_state = self.random_state_ if random_init else None
462
+
463
+ # TODO: make Parallel._effective_n_jobs public instead?
464
+ n_jobs = effective_n_jobs(self.n_jobs)
465
+ if parallel is None:
466
+ parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
467
+ results = parallel(
468
+ delayed(_update_doc_distribution)(
469
+ X[idx_slice, :],
470
+ self.exp_dirichlet_component_,
471
+ self.doc_topic_prior_,
472
+ self.max_doc_update_iter,
473
+ self.mean_change_tol,
474
+ cal_sstats,
475
+ random_state,
476
+ )
477
+ for idx_slice in gen_even_slices(X.shape[0], n_jobs)
478
+ )
479
+
480
+ # merge result
481
+ doc_topics, sstats_list = zip(*results)
482
+ doc_topic_distr = np.vstack(doc_topics)
483
+
484
+ if cal_sstats:
485
+ # This step finishes computing the sufficient statistics for the
486
+ # M-step.
487
+ suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
488
+ for sstats in sstats_list:
489
+ suff_stats += sstats
490
+ suff_stats *= self.exp_dirichlet_component_
491
+ else:
492
+ suff_stats = None
493
+
494
+ return (doc_topic_distr, suff_stats)
495
+
496
+ def _em_step(self, X, total_samples, batch_update, parallel=None):
497
+ """EM update for 1 iteration.
498
+
499
+ update `_component` by batch VB or online VB.
500
+
501
+ Parameters
502
+ ----------
503
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
504
+ Document word matrix.
505
+
506
+ total_samples : int
507
+ Total number of documents. It is only used when
508
+ batch_update is `False`.
509
+
510
+ batch_update : bool
511
+ Parameter that controls updating method.
512
+ `True` for batch learning, `False` for online learning.
513
+
514
+ parallel : joblib.Parallel, default=None
515
+ Pre-initialized instance of joblib.Parallel
516
+
517
+ Returns
518
+ -------
519
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
520
+ Unnormalized document topic distribution.
521
+ """
522
+
523
+ # E-step
524
+ _, suff_stats = self._e_step(
525
+ X, cal_sstats=True, random_init=True, parallel=parallel
526
+ )
527
+
528
+ # M-step
529
+ if batch_update:
530
+ self.components_ = self.topic_word_prior_ + suff_stats
531
+ else:
532
+ # online update
533
+ # In the literature, the weight is `rho`
534
+ weight = np.power(
535
+ self.learning_offset + self.n_batch_iter_, -self.learning_decay
536
+ )
537
+ doc_ratio = float(total_samples) / X.shape[0]
538
+ self.components_ *= 1 - weight
539
+ self.components_ += weight * (
540
+ self.topic_word_prior_ + doc_ratio * suff_stats
541
+ )
542
+
543
+ # update `component_` related variables
544
+ self.exp_dirichlet_component_ = np.exp(
545
+ _dirichlet_expectation_2d(self.components_)
546
+ )
547
+ self.n_batch_iter_ += 1
548
+ return
549
+
550
+ def _more_tags(self):
551
+ return {
552
+ "preserves_dtype": [np.float64, np.float32],
553
+ "requires_positive_X": True,
554
+ }
555
+
556
+ def _check_non_neg_array(self, X, reset_n_features, whom):
557
+ """check X format
558
+
559
+ check X format and make sure no negative value in X.
560
+
561
+ Parameters
562
+ ----------
563
+ X : array-like or sparse matrix
564
+
565
+ """
566
+ dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype
567
+
568
+ X = self._validate_data(
569
+ X,
570
+ reset=reset_n_features,
571
+ accept_sparse="csr",
572
+ dtype=dtype,
573
+ )
574
+ check_non_negative(X, whom)
575
+
576
+ return X
577
+
578
+ @_fit_context(prefer_skip_nested_validation=True)
579
+ def partial_fit(self, X, y=None):
580
+ """Online VB with Mini-Batch update.
581
+
582
+ Parameters
583
+ ----------
584
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
585
+ Document word matrix.
586
+
587
+ y : Ignored
588
+ Not used, present here for API consistency by convention.
589
+
590
+ Returns
591
+ -------
592
+ self
593
+ Partially fitted estimator.
594
+ """
595
+ first_time = not hasattr(self, "components_")
596
+
597
+ X = self._check_non_neg_array(
598
+ X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit"
599
+ )
600
+ n_samples, n_features = X.shape
601
+ batch_size = self.batch_size
602
+
603
+ # initialize parameters or check
604
+ if first_time:
605
+ self._init_latent_vars(n_features, dtype=X.dtype)
606
+
607
+ if n_features != self.components_.shape[1]:
608
+ raise ValueError(
609
+ "The provided data has %d dimensions while "
610
+ "the model was trained with feature size %d."
611
+ % (n_features, self.components_.shape[1])
612
+ )
613
+
614
+ n_jobs = effective_n_jobs(self.n_jobs)
615
+ with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
616
+ for idx_slice in gen_batches(n_samples, batch_size):
617
+ self._em_step(
618
+ X[idx_slice, :],
619
+ total_samples=self.total_samples,
620
+ batch_update=False,
621
+ parallel=parallel,
622
+ )
623
+
624
+ return self
625
+
626
+ @_fit_context(prefer_skip_nested_validation=True)
627
+ def fit(self, X, y=None):
628
+ """Learn model for the data X with variational Bayes method.
629
+
630
+ When `learning_method` is 'online', use mini-batch update.
631
+ Otherwise, use batch update.
632
+
633
+ Parameters
634
+ ----------
635
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
636
+ Document word matrix.
637
+
638
+ y : Ignored
639
+ Not used, present here for API consistency by convention.
640
+
641
+ Returns
642
+ -------
643
+ self
644
+ Fitted estimator.
645
+ """
646
+ X = self._check_non_neg_array(
647
+ X, reset_n_features=True, whom="LatentDirichletAllocation.fit"
648
+ )
649
+ n_samples, n_features = X.shape
650
+ max_iter = self.max_iter
651
+ evaluate_every = self.evaluate_every
652
+ learning_method = self.learning_method
653
+
654
+ batch_size = self.batch_size
655
+
656
+ # initialize parameters
657
+ self._init_latent_vars(n_features, dtype=X.dtype)
658
+ # change to perplexity later
659
+ last_bound = None
660
+ n_jobs = effective_n_jobs(self.n_jobs)
661
+ with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
662
+ for i in range(max_iter):
663
+ if learning_method == "online":
664
+ for idx_slice in gen_batches(n_samples, batch_size):
665
+ self._em_step(
666
+ X[idx_slice, :],
667
+ total_samples=n_samples,
668
+ batch_update=False,
669
+ parallel=parallel,
670
+ )
671
+ else:
672
+ # batch update
673
+ self._em_step(
674
+ X, total_samples=n_samples, batch_update=True, parallel=parallel
675
+ )
676
+
677
+ # check perplexity
678
+ if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
679
+ doc_topics_distr, _ = self._e_step(
680
+ X, cal_sstats=False, random_init=False, parallel=parallel
681
+ )
682
+ bound = self._perplexity_precomp_distr(
683
+ X, doc_topics_distr, sub_sampling=False
684
+ )
685
+ if self.verbose:
686
+ print(
687
+ "iteration: %d of max_iter: %d, perplexity: %.4f"
688
+ % (i + 1, max_iter, bound)
689
+ )
690
+
691
+ if last_bound and abs(last_bound - bound) < self.perp_tol:
692
+ break
693
+ last_bound = bound
694
+
695
+ elif self.verbose:
696
+ print("iteration: %d of max_iter: %d" % (i + 1, max_iter))
697
+ self.n_iter_ += 1
698
+
699
+ # calculate final perplexity value on train set
700
+ doc_topics_distr, _ = self._e_step(
701
+ X, cal_sstats=False, random_init=False, parallel=parallel
702
+ )
703
+ self.bound_ = self._perplexity_precomp_distr(
704
+ X, doc_topics_distr, sub_sampling=False
705
+ )
706
+
707
+ return self
708
+
709
+ def _unnormalized_transform(self, X):
710
+ """Transform data X according to fitted model.
711
+
712
+ Parameters
713
+ ----------
714
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
715
+ Document word matrix.
716
+
717
+ Returns
718
+ -------
719
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
720
+ Document topic distribution for X.
721
+ """
722
+ doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False)
723
+
724
+ return doc_topic_distr
725
+
726
+ def transform(self, X):
727
+ """Transform data X according to the fitted model.
728
+
729
+ .. versionchanged:: 0.18
730
+ *doc_topic_distr* is now normalized
731
+
732
+ Parameters
733
+ ----------
734
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
735
+ Document word matrix.
736
+
737
+ Returns
738
+ -------
739
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
740
+ Document topic distribution for X.
741
+ """
742
+ check_is_fitted(self)
743
+ X = self._check_non_neg_array(
744
+ X, reset_n_features=False, whom="LatentDirichletAllocation.transform"
745
+ )
746
+ doc_topic_distr = self._unnormalized_transform(X)
747
+ doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
748
+ return doc_topic_distr
749
+
750
+ def _approx_bound(self, X, doc_topic_distr, sub_sampling):
751
+ """Estimate the variational bound.
752
+
753
+ Estimate the variational bound over "all documents" using only the
754
+ documents passed in as X. Since log-likelihood of each word cannot
755
+ be computed directly, we use this bound to estimate it.
756
+
757
+ Parameters
758
+ ----------
759
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
760
+ Document word matrix.
761
+
762
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
763
+ Document topic distribution. In the literature, this is called
764
+ gamma.
765
+
766
+ sub_sampling : bool, default=False
767
+ Compensate for subsampling of documents.
768
+ It is used in calculate bound in online learning.
769
+
770
+ Returns
771
+ -------
772
+ score : float
773
+
774
+ """
775
+
776
+ def _loglikelihood(prior, distr, dirichlet_distr, size):
777
+ # calculate log-likelihood
778
+ score = np.sum((prior - distr) * dirichlet_distr)
779
+ score += np.sum(gammaln(distr) - gammaln(prior))
780
+ score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
781
+ return score
782
+
783
+ is_sparse_x = sp.issparse(X)
784
+ n_samples, n_components = doc_topic_distr.shape
785
+ n_features = self.components_.shape[1]
786
+ score = 0
787
+
788
+ dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
789
+ dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
790
+ doc_topic_prior = self.doc_topic_prior_
791
+ topic_word_prior = self.topic_word_prior_
792
+
793
+ if is_sparse_x:
794
+ X_data = X.data
795
+ X_indices = X.indices
796
+ X_indptr = X.indptr
797
+
798
+ # E[log p(docs | theta, beta)]
799
+ for idx_d in range(0, n_samples):
800
+ if is_sparse_x:
801
+ ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
802
+ cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
803
+ else:
804
+ ids = np.nonzero(X[idx_d, :])[0]
805
+ cnts = X[idx_d, ids]
806
+ temp = (
807
+ dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
808
+ )
809
+ norm_phi = logsumexp(temp, axis=0)
810
+ score += np.dot(cnts, norm_phi)
811
+
812
+ # compute E[log p(theta | alpha) - log q(theta | gamma)]
813
+ score += _loglikelihood(
814
+ doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components
815
+ )
816
+
817
+ # Compensate for the subsampling of the population of documents
818
+ if sub_sampling:
819
+ doc_ratio = float(self.total_samples) / n_samples
820
+ score *= doc_ratio
821
+
822
+ # E[log p(beta | eta) - log q (beta | lambda)]
823
+ score += _loglikelihood(
824
+ topic_word_prior, self.components_, dirichlet_component_, n_features
825
+ )
826
+
827
+ return score
828
+
829
+ def score(self, X, y=None):
830
+ """Calculate approximate log-likelihood as score.
831
+
832
+ Parameters
833
+ ----------
834
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
835
+ Document word matrix.
836
+
837
+ y : Ignored
838
+ Not used, present here for API consistency by convention.
839
+
840
+ Returns
841
+ -------
842
+ score : float
843
+ Use approximate bound as score.
844
+ """
845
+ check_is_fitted(self)
846
+ X = self._check_non_neg_array(
847
+ X, reset_n_features=False, whom="LatentDirichletAllocation.score"
848
+ )
849
+
850
+ doc_topic_distr = self._unnormalized_transform(X)
851
+ score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
852
+ return score
853
+
854
+ def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
855
+ """Calculate approximate perplexity for data X with ability to accept
856
+ precomputed doc_topic_distr
857
+
858
+ Perplexity is defined as exp(-1. * log-likelihood per word)
859
+
860
+ Parameters
861
+ ----------
862
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
863
+ Document word matrix.
864
+
865
+ doc_topic_distr : ndarray of shape (n_samples, n_components), \
866
+ default=None
867
+ Document topic distribution.
868
+ If it is None, it will be generated by applying transform on X.
869
+
870
+ Returns
871
+ -------
872
+ score : float
873
+ Perplexity score.
874
+ """
875
+ if doc_topic_distr is None:
876
+ doc_topic_distr = self._unnormalized_transform(X)
877
+ else:
878
+ n_samples, n_components = doc_topic_distr.shape
879
+ if n_samples != X.shape[0]:
880
+ raise ValueError(
881
+ "Number of samples in X and doc_topic_distr do not match."
882
+ )
883
+
884
+ if n_components != self.n_components:
885
+ raise ValueError("Number of topics does not match.")
886
+
887
+ current_samples = X.shape[0]
888
+ bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
889
+
890
+ if sub_sampling:
891
+ word_cnt = X.sum() * (float(self.total_samples) / current_samples)
892
+ else:
893
+ word_cnt = X.sum()
894
+ perword_bound = bound / word_cnt
895
+
896
+ return np.exp(-1.0 * perword_bound)
897
+
898
+ def perplexity(self, X, sub_sampling=False):
899
+ """Calculate approximate perplexity for data X.
900
+
901
+ Perplexity is defined as exp(-1. * log-likelihood per word)
902
+
903
+ .. versionchanged:: 0.19
904
+ *doc_topic_distr* argument has been deprecated and is ignored
905
+ because user no longer has access to unnormalized distribution
906
+
907
+ Parameters
908
+ ----------
909
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
910
+ Document word matrix.
911
+
912
+ sub_sampling : bool
913
+ Do sub-sampling or not.
914
+
915
+ Returns
916
+ -------
917
+ score : float
918
+ Perplexity score.
919
+ """
920
+ check_is_fitted(self)
921
+ X = self._check_non_neg_array(
922
+ X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity"
923
+ )
924
+ return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
925
+
926
+ @property
927
+ def _n_features_out(self):
928
+ """Number of transformed output features."""
929
+ return self.components_.shape[0]
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (307 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Matrix factorization with Sparse PCA."""
2
+ # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Integral, Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import (
10
+ BaseEstimator,
11
+ ClassNamePrefixFeaturesOutMixin,
12
+ TransformerMixin,
13
+ _fit_context,
14
+ )
15
+ from ..linear_model import ridge_regression
16
+ from ..utils import check_random_state
17
+ from ..utils._param_validation import Hidden, Interval, StrOptions
18
+ from ..utils.extmath import svd_flip
19
+ from ..utils.validation import check_array, check_is_fitted
20
+ from ._dict_learning import MiniBatchDictionaryLearning, dict_learning
21
+
22
+
23
+ class _BaseSparsePCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
24
+ """Base class for SparsePCA and MiniBatchSparsePCA"""
25
+
26
+ _parameter_constraints: dict = {
27
+ "n_components": [None, Interval(Integral, 1, None, closed="left")],
28
+ "alpha": [Interval(Real, 0.0, None, closed="left")],
29
+ "ridge_alpha": [Interval(Real, 0.0, None, closed="left")],
30
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
31
+ "tol": [Interval(Real, 0.0, None, closed="left")],
32
+ "method": [StrOptions({"lars", "cd"})],
33
+ "n_jobs": [Integral, None],
34
+ "verbose": ["verbose"],
35
+ "random_state": ["random_state"],
36
+ }
37
+
38
+ def __init__(
39
+ self,
40
+ n_components=None,
41
+ *,
42
+ alpha=1,
43
+ ridge_alpha=0.01,
44
+ max_iter=1000,
45
+ tol=1e-8,
46
+ method="lars",
47
+ n_jobs=None,
48
+ verbose=False,
49
+ random_state=None,
50
+ ):
51
+ self.n_components = n_components
52
+ self.alpha = alpha
53
+ self.ridge_alpha = ridge_alpha
54
+ self.max_iter = max_iter
55
+ self.tol = tol
56
+ self.method = method
57
+ self.n_jobs = n_jobs
58
+ self.verbose = verbose
59
+ self.random_state = random_state
60
+
61
+ @_fit_context(prefer_skip_nested_validation=True)
62
+ def fit(self, X, y=None):
63
+ """Fit the model from data in X.
64
+
65
+ Parameters
66
+ ----------
67
+ X : array-like of shape (n_samples, n_features)
68
+ Training vector, where `n_samples` is the number of samples
69
+ and `n_features` is the number of features.
70
+
71
+ y : Ignored
72
+ Not used, present here for API consistency by convention.
73
+
74
+ Returns
75
+ -------
76
+ self : object
77
+ Returns the instance itself.
78
+ """
79
+ random_state = check_random_state(self.random_state)
80
+ X = self._validate_data(X)
81
+
82
+ self.mean_ = X.mean(axis=0)
83
+ X = X - self.mean_
84
+
85
+ if self.n_components is None:
86
+ n_components = X.shape[1]
87
+ else:
88
+ n_components = self.n_components
89
+
90
+ return self._fit(X, n_components, random_state)
91
+
92
+ def transform(self, X):
93
+ """Least Squares projection of the data onto the sparse components.
94
+
95
+ To avoid instability issues in case the system is under-determined,
96
+ regularization can be applied (Ridge regression) via the
97
+ `ridge_alpha` parameter.
98
+
99
+ Note that Sparse PCA components orthogonality is not enforced as in PCA
100
+ hence one cannot use a simple linear projection.
101
+
102
+ Parameters
103
+ ----------
104
+ X : ndarray of shape (n_samples, n_features)
105
+ Test data to be transformed, must have the same number of
106
+ features as the data used to train the model.
107
+
108
+ Returns
109
+ -------
110
+ X_new : ndarray of shape (n_samples, n_components)
111
+ Transformed data.
112
+ """
113
+ check_is_fitted(self)
114
+
115
+ X = self._validate_data(X, reset=False)
116
+ X = X - self.mean_
117
+
118
+ U = ridge_regression(
119
+ self.components_.T, X.T, self.ridge_alpha, solver="cholesky"
120
+ )
121
+
122
+ return U
123
+
124
+ def inverse_transform(self, X):
125
+ """Transform data from the latent space to the original space.
126
+
127
+ This inversion is an approximation due to the loss of information
128
+ induced by the forward decomposition.
129
+
130
+ .. versionadded:: 1.2
131
+
132
+ Parameters
133
+ ----------
134
+ X : ndarray of shape (n_samples, n_components)
135
+ Data in the latent space.
136
+
137
+ Returns
138
+ -------
139
+ X_original : ndarray of shape (n_samples, n_features)
140
+ Reconstructed data in the original space.
141
+ """
142
+ check_is_fitted(self)
143
+ X = check_array(X)
144
+
145
+ return (X @ self.components_) + self.mean_
146
+
147
+ @property
148
+ def _n_features_out(self):
149
+ """Number of transformed output features."""
150
+ return self.components_.shape[0]
151
+
152
+ def _more_tags(self):
153
+ return {
154
+ "preserves_dtype": [np.float64, np.float32],
155
+ }
156
+
157
+
158
+ class SparsePCA(_BaseSparsePCA):
159
+ """Sparse Principal Components Analysis (SparsePCA).
160
+
161
+ Finds the set of sparse components that can optimally reconstruct
162
+ the data. The amount of sparseness is controllable by the coefficient
163
+ of the L1 penalty, given by the parameter alpha.
164
+
165
+ Read more in the :ref:`User Guide <SparsePCA>`.
166
+
167
+ Parameters
168
+ ----------
169
+ n_components : int, default=None
170
+ Number of sparse atoms to extract. If None, then ``n_components``
171
+ is set to ``n_features``.
172
+
173
+ alpha : float, default=1
174
+ Sparsity controlling parameter. Higher values lead to sparser
175
+ components.
176
+
177
+ ridge_alpha : float, default=0.01
178
+ Amount of ridge shrinkage to apply in order to improve
179
+ conditioning when calling the transform method.
180
+
181
+ max_iter : int, default=1000
182
+ Maximum number of iterations to perform.
183
+
184
+ tol : float, default=1e-8
185
+ Tolerance for the stopping condition.
186
+
187
+ method : {'lars', 'cd'}, default='lars'
188
+ Method to be used for optimization.
189
+ lars: uses the least angle regression method to solve the lasso problem
190
+ (linear_model.lars_path)
191
+ cd: uses the coordinate descent method to compute the
192
+ Lasso solution (linear_model.Lasso). Lars will be faster if
193
+ the estimated components are sparse.
194
+
195
+ n_jobs : int, default=None
196
+ Number of parallel jobs to run.
197
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
198
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
199
+ for more details.
200
+
201
+ U_init : ndarray of shape (n_samples, n_components), default=None
202
+ Initial values for the loadings for warm restart scenarios. Only used
203
+ if `U_init` and `V_init` are not None.
204
+
205
+ V_init : ndarray of shape (n_components, n_features), default=None
206
+ Initial values for the components for warm restart scenarios. Only used
207
+ if `U_init` and `V_init` are not None.
208
+
209
+ verbose : int or bool, default=False
210
+ Controls the verbosity; the higher, the more messages. Defaults to 0.
211
+
212
+ random_state : int, RandomState instance or None, default=None
213
+ Used during dictionary learning. Pass an int for reproducible results
214
+ across multiple function calls.
215
+ See :term:`Glossary <random_state>`.
216
+
217
+ Attributes
218
+ ----------
219
+ components_ : ndarray of shape (n_components, n_features)
220
+ Sparse components extracted from the data.
221
+
222
+ error_ : ndarray
223
+ Vector of errors at each iteration.
224
+
225
+ n_components_ : int
226
+ Estimated number of components.
227
+
228
+ .. versionadded:: 0.23
229
+
230
+ n_iter_ : int
231
+ Number of iterations run.
232
+
233
+ mean_ : ndarray of shape (n_features,)
234
+ Per-feature empirical mean, estimated from the training set.
235
+ Equal to ``X.mean(axis=0)``.
236
+
237
+ n_features_in_ : int
238
+ Number of features seen during :term:`fit`.
239
+
240
+ .. versionadded:: 0.24
241
+
242
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
243
+ Names of features seen during :term:`fit`. Defined only when `X`
244
+ has feature names that are all strings.
245
+
246
+ .. versionadded:: 1.0
247
+
248
+ See Also
249
+ --------
250
+ PCA : Principal Component Analysis implementation.
251
+ MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less
252
+ accurate.
253
+ DictionaryLearning : Generic dictionary learning problem using a sparse code.
254
+
255
+ Examples
256
+ --------
257
+ >>> import numpy as np
258
+ >>> from sklearn.datasets import make_friedman1
259
+ >>> from sklearn.decomposition import SparsePCA
260
+ >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
261
+ >>> transformer = SparsePCA(n_components=5, random_state=0)
262
+ >>> transformer.fit(X)
263
+ SparsePCA(...)
264
+ >>> X_transformed = transformer.transform(X)
265
+ >>> X_transformed.shape
266
+ (200, 5)
267
+ >>> # most values in the components_ are zero (sparsity)
268
+ >>> np.mean(transformer.components_ == 0)
269
+ 0.9666...
270
+ """
271
+
272
+ _parameter_constraints: dict = {
273
+ **_BaseSparsePCA._parameter_constraints,
274
+ "U_init": [None, np.ndarray],
275
+ "V_init": [None, np.ndarray],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ n_components=None,
281
+ *,
282
+ alpha=1,
283
+ ridge_alpha=0.01,
284
+ max_iter=1000,
285
+ tol=1e-8,
286
+ method="lars",
287
+ n_jobs=None,
288
+ U_init=None,
289
+ V_init=None,
290
+ verbose=False,
291
+ random_state=None,
292
+ ):
293
+ super().__init__(
294
+ n_components=n_components,
295
+ alpha=alpha,
296
+ ridge_alpha=ridge_alpha,
297
+ max_iter=max_iter,
298
+ tol=tol,
299
+ method=method,
300
+ n_jobs=n_jobs,
301
+ verbose=verbose,
302
+ random_state=random_state,
303
+ )
304
+ self.U_init = U_init
305
+ self.V_init = V_init
306
+
307
+ def _fit(self, X, n_components, random_state):
308
+ """Specialized `fit` for SparsePCA."""
309
+
310
+ code_init = self.V_init.T if self.V_init is not None else None
311
+ dict_init = self.U_init.T if self.U_init is not None else None
312
+ code, dictionary, E, self.n_iter_ = dict_learning(
313
+ X.T,
314
+ n_components,
315
+ alpha=self.alpha,
316
+ tol=self.tol,
317
+ max_iter=self.max_iter,
318
+ method=self.method,
319
+ n_jobs=self.n_jobs,
320
+ verbose=self.verbose,
321
+ random_state=random_state,
322
+ code_init=code_init,
323
+ dict_init=dict_init,
324
+ return_n_iter=True,
325
+ )
326
+ # flip eigenvectors' sign to enforce deterministic output
327
+ code, dictionary = svd_flip(code, dictionary, u_based_decision=False)
328
+ self.components_ = code.T
329
+ components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
330
+ components_norm[components_norm == 0] = 1
331
+ self.components_ /= components_norm
332
+ self.n_components_ = len(self.components_)
333
+
334
+ self.error_ = E
335
+ return self
336
+
337
+
338
+ class MiniBatchSparsePCA(_BaseSparsePCA):
339
+ """Mini-batch Sparse Principal Components Analysis.
340
+
341
+ Finds the set of sparse components that can optimally reconstruct
342
+ the data. The amount of sparseness is controllable by the coefficient
343
+ of the L1 penalty, given by the parameter alpha.
344
+
345
+ For an example comparing sparse PCA to PCA, see
346
+ :ref:`sphx_glr_auto_examples_decomposition_plot_faces_decomposition.py`
347
+
348
+ Read more in the :ref:`User Guide <SparsePCA>`.
349
+
350
+ Parameters
351
+ ----------
352
+ n_components : int, default=None
353
+ Number of sparse atoms to extract. If None, then ``n_components``
354
+ is set to ``n_features``.
355
+
356
+ alpha : int, default=1
357
+ Sparsity controlling parameter. Higher values lead to sparser
358
+ components.
359
+
360
+ ridge_alpha : float, default=0.01
361
+ Amount of ridge shrinkage to apply in order to improve
362
+ conditioning when calling the transform method.
363
+
364
+ max_iter : int, default=1_000
365
+ Maximum number of iterations over the complete dataset before
366
+ stopping independently of any early stopping criterion heuristics.
367
+
368
+ .. versionadded:: 1.2
369
+
370
+ .. deprecated:: 1.4
371
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
372
+ Use the default value (i.e. `100`) instead.
373
+
374
+ callback : callable, default=None
375
+ Callable that gets invoked every five iterations.
376
+
377
+ batch_size : int, default=3
378
+ The number of features to take in each mini batch.
379
+
380
+ verbose : int or bool, default=False
381
+ Controls the verbosity; the higher, the more messages. Defaults to 0.
382
+
383
+ shuffle : bool, default=True
384
+ Whether to shuffle the data before splitting it in batches.
385
+
386
+ n_jobs : int, default=None
387
+ Number of parallel jobs to run.
388
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
389
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
390
+ for more details.
391
+
392
+ method : {'lars', 'cd'}, default='lars'
393
+ Method to be used for optimization.
394
+ lars: uses the least angle regression method to solve the lasso problem
395
+ (linear_model.lars_path)
396
+ cd: uses the coordinate descent method to compute the
397
+ Lasso solution (linear_model.Lasso). Lars will be faster if
398
+ the estimated components are sparse.
399
+
400
+ random_state : int, RandomState instance or None, default=None
401
+ Used for random shuffling when ``shuffle`` is set to ``True``,
402
+ during online dictionary learning. Pass an int for reproducible results
403
+ across multiple function calls.
404
+ See :term:`Glossary <random_state>`.
405
+
406
+ tol : float, default=1e-3
407
+ Control early stopping based on the norm of the differences in the
408
+ dictionary between 2 steps.
409
+
410
+ To disable early stopping based on changes in the dictionary, set
411
+ `tol` to 0.0.
412
+
413
+ .. versionadded:: 1.1
414
+
415
+ max_no_improvement : int or None, default=10
416
+ Control early stopping based on the consecutive number of mini batches
417
+ that does not yield an improvement on the smoothed cost function.
418
+
419
+ To disable convergence detection based on cost function, set
420
+ `max_no_improvement` to `None`.
421
+
422
+ .. versionadded:: 1.1
423
+
424
+ Attributes
425
+ ----------
426
+ components_ : ndarray of shape (n_components, n_features)
427
+ Sparse components extracted from the data.
428
+
429
+ n_components_ : int
430
+ Estimated number of components.
431
+
432
+ .. versionadded:: 0.23
433
+
434
+ n_iter_ : int
435
+ Number of iterations run.
436
+
437
+ mean_ : ndarray of shape (n_features,)
438
+ Per-feature empirical mean, estimated from the training set.
439
+ Equal to ``X.mean(axis=0)``.
440
+
441
+ n_features_in_ : int
442
+ Number of features seen during :term:`fit`.
443
+
444
+ .. versionadded:: 0.24
445
+
446
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
447
+ Names of features seen during :term:`fit`. Defined only when `X`
448
+ has feature names that are all strings.
449
+
450
+ .. versionadded:: 1.0
451
+
452
+ See Also
453
+ --------
454
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
455
+ IncrementalPCA : Incremental principal components analysis.
456
+ PCA : Principal component analysis.
457
+ SparsePCA : Sparse Principal Components Analysis.
458
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
459
+
460
+ Examples
461
+ --------
462
+ >>> import numpy as np
463
+ >>> from sklearn.datasets import make_friedman1
464
+ >>> from sklearn.decomposition import MiniBatchSparsePCA
465
+ >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
466
+ >>> transformer = MiniBatchSparsePCA(n_components=5, batch_size=50,
467
+ ... max_iter=10, random_state=0)
468
+ >>> transformer.fit(X)
469
+ MiniBatchSparsePCA(...)
470
+ >>> X_transformed = transformer.transform(X)
471
+ >>> X_transformed.shape
472
+ (200, 5)
473
+ >>> # most values in the components_ are zero (sparsity)
474
+ >>> np.mean(transformer.components_ == 0)
475
+ 0.9...
476
+ """
477
+
478
+ _parameter_constraints: dict = {
479
+ **_BaseSparsePCA._parameter_constraints,
480
+ "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)],
481
+ "callback": [None, callable],
482
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
483
+ "shuffle": ["boolean"],
484
+ "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
485
+ }
486
+
487
+ def __init__(
488
+ self,
489
+ n_components=None,
490
+ *,
491
+ alpha=1,
492
+ ridge_alpha=0.01,
493
+ max_iter=1_000,
494
+ callback=None,
495
+ batch_size=3,
496
+ verbose=False,
497
+ shuffle=True,
498
+ n_jobs=None,
499
+ method="lars",
500
+ random_state=None,
501
+ tol=1e-3,
502
+ max_no_improvement=10,
503
+ ):
504
+ super().__init__(
505
+ n_components=n_components,
506
+ alpha=alpha,
507
+ ridge_alpha=ridge_alpha,
508
+ max_iter=max_iter,
509
+ tol=tol,
510
+ method=method,
511
+ n_jobs=n_jobs,
512
+ verbose=verbose,
513
+ random_state=random_state,
514
+ )
515
+ self.callback = callback
516
+ self.batch_size = batch_size
517
+ self.shuffle = shuffle
518
+ self.max_no_improvement = max_no_improvement
519
+
520
+ def _fit(self, X, n_components, random_state):
521
+ """Specialized `fit` for MiniBatchSparsePCA."""
522
+
523
+ transform_algorithm = "lasso_" + self.method
524
+ est = MiniBatchDictionaryLearning(
525
+ n_components=n_components,
526
+ alpha=self.alpha,
527
+ max_iter=self.max_iter,
528
+ dict_init=None,
529
+ batch_size=self.batch_size,
530
+ shuffle=self.shuffle,
531
+ n_jobs=self.n_jobs,
532
+ fit_algorithm=self.method,
533
+ random_state=random_state,
534
+ transform_algorithm=transform_algorithm,
535
+ transform_alpha=self.alpha,
536
+ verbose=self.verbose,
537
+ callback=self.callback,
538
+ tol=self.tol,
539
+ max_no_improvement=self.max_no_improvement,
540
+ )
541
+ est.set_output(transform="default")
542
+ est.fit(X.T)
543
+
544
+ self.components_, self.n_iter_ = est.transform(X.T).T, est.n_iter_
545
+
546
+ components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
547
+ components_norm[components_norm == 0] = 1
548
+ self.components_ /= components_norm
549
+ self.n_components_ = len(self.components_)
550
+
551
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
2
+ """
3
+
4
+ # Author: Lars Buitinck
5
+ # Olivier Grisel <[email protected]>
6
+ # Michael Becker <[email protected]>
7
+ # License: 3-clause BSD.
8
+
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ import scipy.sparse as sp
13
+ from scipy.sparse.linalg import svds
14
+
15
+ from ..base import (
16
+ BaseEstimator,
17
+ ClassNamePrefixFeaturesOutMixin,
18
+ TransformerMixin,
19
+ _fit_context,
20
+ )
21
+ from ..utils import check_array, check_random_state
22
+ from ..utils._arpack import _init_arpack_v0
23
+ from ..utils._param_validation import Interval, StrOptions
24
+ from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
25
+ from ..utils.sparsefuncs import mean_variance_axis
26
+ from ..utils.validation import check_is_fitted
27
+
28
+ __all__ = ["TruncatedSVD"]
29
+
30
+
31
+ class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
32
+ """Dimensionality reduction using truncated SVD (aka LSA).
33
+
34
+ This transformer performs linear dimensionality reduction by means of
35
+ truncated singular value decomposition (SVD). Contrary to PCA, this
36
+ estimator does not center the data before computing the singular value
37
+ decomposition. This means it can work with sparse matrices
38
+ efficiently.
39
+
40
+ In particular, truncated SVD works on term count/tf-idf matrices as
41
+ returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In
42
+ that context, it is known as latent semantic analysis (LSA).
43
+
44
+ This estimator supports two algorithms: a fast randomized SVD solver, and
45
+ a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or
46
+ `X.T * X`, whichever is more efficient.
47
+
48
+ Read more in the :ref:`User Guide <LSA>`.
49
+
50
+ Parameters
51
+ ----------
52
+ n_components : int, default=2
53
+ Desired dimensionality of output data.
54
+ If algorithm='arpack', must be strictly less than the number of features.
55
+ If algorithm='randomized', must be less than or equal to the number of features.
56
+ The default value is useful for visualisation. For LSA, a value of
57
+ 100 is recommended.
58
+
59
+ algorithm : {'arpack', 'randomized'}, default='randomized'
60
+ SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
61
+ (scipy.sparse.linalg.svds), or "randomized" for the randomized
62
+ algorithm due to Halko (2009).
63
+
64
+ n_iter : int, default=5
65
+ Number of iterations for randomized SVD solver. Not used by ARPACK. The
66
+ default is larger than the default in
67
+ :func:`~sklearn.utils.extmath.randomized_svd` to handle sparse
68
+ matrices that may have large slowly decaying spectrum.
69
+
70
+ n_oversamples : int, default=10
71
+ Number of oversamples for randomized SVD solver. Not used by ARPACK.
72
+ See :func:`~sklearn.utils.extmath.randomized_svd` for a complete
73
+ description.
74
+
75
+ .. versionadded:: 1.1
76
+
77
+ power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
78
+ Power iteration normalizer for randomized SVD solver.
79
+ Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
80
+ for more details.
81
+
82
+ .. versionadded:: 1.1
83
+
84
+ random_state : int, RandomState instance or None, default=None
85
+ Used during randomized svd. Pass an int for reproducible results across
86
+ multiple function calls.
87
+ See :term:`Glossary <random_state>`.
88
+
89
+ tol : float, default=0.0
90
+ Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
91
+ SVD solver.
92
+
93
+ Attributes
94
+ ----------
95
+ components_ : ndarray of shape (n_components, n_features)
96
+ The right singular vectors of the input data.
97
+
98
+ explained_variance_ : ndarray of shape (n_components,)
99
+ The variance of the training samples transformed by a projection to
100
+ each component.
101
+
102
+ explained_variance_ratio_ : ndarray of shape (n_components,)
103
+ Percentage of variance explained by each of the selected components.
104
+
105
+ singular_values_ : ndarray of shape (n_components,)
106
+ The singular values corresponding to each of the selected components.
107
+ The singular values are equal to the 2-norms of the ``n_components``
108
+ variables in the lower-dimensional space.
109
+
110
+ n_features_in_ : int
111
+ Number of features seen during :term:`fit`.
112
+
113
+ .. versionadded:: 0.24
114
+
115
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
116
+ Names of features seen during :term:`fit`. Defined only when `X`
117
+ has feature names that are all strings.
118
+
119
+ .. versionadded:: 1.0
120
+
121
+ See Also
122
+ --------
123
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
124
+ FactorAnalysis : A simple linear generative model with
125
+ Gaussian latent variables.
126
+ IncrementalPCA : Incremental principal components analysis.
127
+ KernelPCA : Kernel Principal component analysis.
128
+ NMF : Non-Negative Matrix Factorization.
129
+ PCA : Principal component analysis.
130
+
131
+ Notes
132
+ -----
133
+ SVD suffers from a problem called "sign indeterminacy", which means the
134
+ sign of the ``components_`` and the output from transform depend on the
135
+ algorithm and random state. To work around this, fit instances of this
136
+ class to data once, then keep the instance around to do transformations.
137
+
138
+ References
139
+ ----------
140
+ :arxiv:`Halko, et al. (2009). "Finding structure with randomness:
141
+ Stochastic algorithms for constructing approximate matrix decompositions"
142
+ <0909.4061>`
143
+
144
+ Examples
145
+ --------
146
+ >>> from sklearn.decomposition import TruncatedSVD
147
+ >>> from scipy.sparse import csr_matrix
148
+ >>> import numpy as np
149
+ >>> np.random.seed(0)
150
+ >>> X_dense = np.random.rand(100, 100)
151
+ >>> X_dense[:, 2 * np.arange(50)] = 0
152
+ >>> X = csr_matrix(X_dense)
153
+ >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
154
+ >>> svd.fit(X)
155
+ TruncatedSVD(n_components=5, n_iter=7, random_state=42)
156
+ >>> print(svd.explained_variance_ratio_)
157
+ [0.0157... 0.0512... 0.0499... 0.0479... 0.0453...]
158
+ >>> print(svd.explained_variance_ratio_.sum())
159
+ 0.2102...
160
+ >>> print(svd.singular_values_)
161
+ [35.2410... 4.5981... 4.5420... 4.4486... 4.3288...]
162
+ """
163
+
164
+ _parameter_constraints: dict = {
165
+ "n_components": [Interval(Integral, 1, None, closed="left")],
166
+ "algorithm": [StrOptions({"arpack", "randomized"})],
167
+ "n_iter": [Interval(Integral, 0, None, closed="left")],
168
+ "n_oversamples": [Interval(Integral, 1, None, closed="left")],
169
+ "power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})],
170
+ "random_state": ["random_state"],
171
+ "tol": [Interval(Real, 0, None, closed="left")],
172
+ }
173
+
174
+ def __init__(
175
+ self,
176
+ n_components=2,
177
+ *,
178
+ algorithm="randomized",
179
+ n_iter=5,
180
+ n_oversamples=10,
181
+ power_iteration_normalizer="auto",
182
+ random_state=None,
183
+ tol=0.0,
184
+ ):
185
+ self.algorithm = algorithm
186
+ self.n_components = n_components
187
+ self.n_iter = n_iter
188
+ self.n_oversamples = n_oversamples
189
+ self.power_iteration_normalizer = power_iteration_normalizer
190
+ self.random_state = random_state
191
+ self.tol = tol
192
+
193
+ def fit(self, X, y=None):
194
+ """Fit model on training data X.
195
+
196
+ Parameters
197
+ ----------
198
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
199
+ Training data.
200
+
201
+ y : Ignored
202
+ Not used, present here for API consistency by convention.
203
+
204
+ Returns
205
+ -------
206
+ self : object
207
+ Returns the transformer object.
208
+ """
209
+ self.fit_transform(X)
210
+ return self
211
+
212
+ @_fit_context(prefer_skip_nested_validation=True)
213
+ def fit_transform(self, X, y=None):
214
+ """Fit model to X and perform dimensionality reduction on X.
215
+
216
+ Parameters
217
+ ----------
218
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
219
+ Training data.
220
+
221
+ y : Ignored
222
+ Not used, present here for API consistency by convention.
223
+
224
+ Returns
225
+ -------
226
+ X_new : ndarray of shape (n_samples, n_components)
227
+ Reduced version of X. This will always be a dense array.
228
+ """
229
+ X = self._validate_data(X, accept_sparse=["csr", "csc"], ensure_min_features=2)
230
+ random_state = check_random_state(self.random_state)
231
+
232
+ if self.algorithm == "arpack":
233
+ v0 = _init_arpack_v0(min(X.shape), random_state)
234
+ U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0)
235
+ # svds doesn't abide by scipy.linalg.svd/randomized_svd
236
+ # conventions, so reverse its outputs.
237
+ Sigma = Sigma[::-1]
238
+ U, VT = svd_flip(U[:, ::-1], VT[::-1])
239
+
240
+ elif self.algorithm == "randomized":
241
+ if self.n_components > X.shape[1]:
242
+ raise ValueError(
243
+ f"n_components({self.n_components}) must be <="
244
+ f" n_features({X.shape[1]})."
245
+ )
246
+ U, Sigma, VT = randomized_svd(
247
+ X,
248
+ self.n_components,
249
+ n_iter=self.n_iter,
250
+ n_oversamples=self.n_oversamples,
251
+ power_iteration_normalizer=self.power_iteration_normalizer,
252
+ random_state=random_state,
253
+ )
254
+
255
+ self.components_ = VT
256
+
257
+ # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T,
258
+ # X @ V is not the same as U @ Sigma
259
+ if self.algorithm == "randomized" or (
260
+ self.algorithm == "arpack" and self.tol > 0
261
+ ):
262
+ X_transformed = safe_sparse_dot(X, self.components_.T)
263
+ else:
264
+ X_transformed = U * Sigma
265
+
266
+ # Calculate explained variance & explained variance ratio
267
+ self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
268
+ if sp.issparse(X):
269
+ _, full_var = mean_variance_axis(X, axis=0)
270
+ full_var = full_var.sum()
271
+ else:
272
+ full_var = np.var(X, axis=0).sum()
273
+ self.explained_variance_ratio_ = exp_var / full_var
274
+ self.singular_values_ = Sigma # Store the singular values.
275
+
276
+ return X_transformed
277
+
278
+ def transform(self, X):
279
+ """Perform dimensionality reduction on X.
280
+
281
+ Parameters
282
+ ----------
283
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
284
+ New data.
285
+
286
+ Returns
287
+ -------
288
+ X_new : ndarray of shape (n_samples, n_components)
289
+ Reduced version of X. This will always be a dense array.
290
+ """
291
+ check_is_fitted(self)
292
+ X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False)
293
+ return safe_sparse_dot(X, self.components_.T)
294
+
295
+ def inverse_transform(self, X):
296
+ """Transform X back to its original space.
297
+
298
+ Returns an array X_original whose transform would be X.
299
+
300
+ Parameters
301
+ ----------
302
+ X : array-like of shape (n_samples, n_components)
303
+ New data.
304
+
305
+ Returns
306
+ -------
307
+ X_original : ndarray of shape (n_samples, n_features)
308
+ Note that this is always a dense array.
309
+ """
310
+ X = check_array(X)
311
+ return np.dot(X, self.components_)
312
+
313
+ def _more_tags(self):
314
+ return {"preserves_dtype": [np.float64, np.float32]}
315
+
316
+ @property
317
+ def _n_features_out(self):
318
+ """Number of transformed output features."""
319
+ return self.components_.shape[0]
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.feature_selection` module implements feature selection
3
+ algorithms. It currently includes univariate filter selection methods and the
4
+ recursive feature elimination algorithm.
5
+ """
6
+
7
+ from ._base import SelectorMixin
8
+ from ._from_model import SelectFromModel
9
+ from ._mutual_info import mutual_info_classif, mutual_info_regression
10
+ from ._rfe import RFE, RFECV
11
+ from ._sequential import SequentialFeatureSelector
12
+ from ._univariate_selection import (
13
+ GenericUnivariateSelect,
14
+ SelectFdr,
15
+ SelectFpr,
16
+ SelectFwe,
17
+ SelectKBest,
18
+ SelectPercentile,
19
+ chi2,
20
+ f_classif,
21
+ f_oneway,
22
+ f_regression,
23
+ r_regression,
24
+ )
25
+ from ._variance_threshold import VarianceThreshold
26
+
27
+ __all__ = [
28
+ "GenericUnivariateSelect",
29
+ "SequentialFeatureSelector",
30
+ "RFE",
31
+ "RFECV",
32
+ "SelectFdr",
33
+ "SelectFpr",
34
+ "SelectFwe",
35
+ "SelectKBest",
36
+ "SelectFromModel",
37
+ "SelectPercentile",
38
+ "VarianceThreshold",
39
+ "chi2",
40
+ "f_classif",
41
+ "f_oneway",
42
+ "f_regression",
43
+ "r_regression",
44
+ "mutual_info_classif",
45
+ "mutual_info_regression",
46
+ "SelectorMixin",
47
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc ADDED
Binary file (8.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc ADDED
Binary file (9.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc ADDED
Binary file (38.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_base.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generic feature selection mixin"""
2
+
3
+ # Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ from abc import ABCMeta, abstractmethod
8
+ from operator import attrgetter
9
+
10
+ import numpy as np
11
+ from scipy.sparse import csc_matrix, issparse
12
+
13
+ from ..base import TransformerMixin
14
+ from ..utils import (
15
+ _is_pandas_df,
16
+ _safe_indexing,
17
+ check_array,
18
+ safe_sqr,
19
+ )
20
+ from ..utils._set_output import _get_output_config
21
+ from ..utils._tags import _safe_tags
22
+ from ..utils.validation import _check_feature_names_in, check_is_fitted
23
+
24
+
25
+ class SelectorMixin(TransformerMixin, metaclass=ABCMeta):
26
+ """
27
+ Transformer mixin that performs feature selection given a support mask
28
+
29
+ This mixin provides a feature selector implementation with `transform` and
30
+ `inverse_transform` functionality given an implementation of
31
+ `_get_support_mask`.
32
+
33
+ Examples
34
+ --------
35
+ >>> import numpy as np
36
+ >>> from sklearn.datasets import load_iris
37
+ >>> from sklearn.base import BaseEstimator
38
+ >>> from sklearn.feature_selection import SelectorMixin
39
+ >>> class FeatureSelector(SelectorMixin, BaseEstimator):
40
+ ... def fit(self, X, y=None):
41
+ ... self.n_features_in_ = X.shape[1]
42
+ ... return self
43
+ ... def _get_support_mask(self):
44
+ ... mask = np.zeros(self.n_features_in_, dtype=bool)
45
+ ... mask[:2] = True # select the first two features
46
+ ... return mask
47
+ >>> X, y = load_iris(return_X_y=True)
48
+ >>> FeatureSelector().fit_transform(X, y).shape
49
+ (150, 2)
50
+ """
51
+
52
+ def get_support(self, indices=False):
53
+ """
54
+ Get a mask, or integer index, of the features selected.
55
+
56
+ Parameters
57
+ ----------
58
+ indices : bool, default=False
59
+ If True, the return value will be an array of integers, rather
60
+ than a boolean mask.
61
+
62
+ Returns
63
+ -------
64
+ support : array
65
+ An index that selects the retained features from a feature vector.
66
+ If `indices` is False, this is a boolean array of shape
67
+ [# input features], in which an element is True iff its
68
+ corresponding feature is selected for retention. If `indices` is
69
+ True, this is an integer array of shape [# output features] whose
70
+ values are indices into the input feature vector.
71
+ """
72
+ mask = self._get_support_mask()
73
+ return mask if not indices else np.where(mask)[0]
74
+
75
+ @abstractmethod
76
+ def _get_support_mask(self):
77
+ """
78
+ Get the boolean mask indicating which features are selected
79
+
80
+ Returns
81
+ -------
82
+ support : boolean array of shape [# input features]
83
+ An element is True iff its corresponding feature is selected for
84
+ retention.
85
+ """
86
+
87
+ def transform(self, X):
88
+ """Reduce X to the selected features.
89
+
90
+ Parameters
91
+ ----------
92
+ X : array of shape [n_samples, n_features]
93
+ The input samples.
94
+
95
+ Returns
96
+ -------
97
+ X_r : array of shape [n_samples, n_selected_features]
98
+ The input samples with only the selected features.
99
+ """
100
+ # Preserve X when X is a dataframe and the output is configured to
101
+ # be pandas.
102
+ output_config_dense = _get_output_config("transform", estimator=self)["dense"]
103
+ preserve_X = output_config_dense != "default" and _is_pandas_df(X)
104
+
105
+ # note: we use _safe_tags instead of _get_tags because this is a
106
+ # public Mixin.
107
+ X = self._validate_data(
108
+ X,
109
+ dtype=None,
110
+ accept_sparse="csr",
111
+ force_all_finite=not _safe_tags(self, key="allow_nan"),
112
+ cast_to_ndarray=not preserve_X,
113
+ reset=False,
114
+ )
115
+ return self._transform(X)
116
+
117
+ def _transform(self, X):
118
+ """Reduce X to the selected features."""
119
+ mask = self.get_support()
120
+ if not mask.any():
121
+ warnings.warn(
122
+ (
123
+ "No features were selected: either the data is"
124
+ " too noisy or the selection test too strict."
125
+ ),
126
+ UserWarning,
127
+ )
128
+ if hasattr(X, "iloc"):
129
+ return X.iloc[:, :0]
130
+ return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0))
131
+ return _safe_indexing(X, mask, axis=1)
132
+
133
+ def inverse_transform(self, X):
134
+ """Reverse the transformation operation.
135
+
136
+ Parameters
137
+ ----------
138
+ X : array of shape [n_samples, n_selected_features]
139
+ The input samples.
140
+
141
+ Returns
142
+ -------
143
+ X_r : array of shape [n_samples, n_original_features]
144
+ `X` with columns of zeros inserted where features would have
145
+ been removed by :meth:`transform`.
146
+ """
147
+ if issparse(X):
148
+ X = X.tocsc()
149
+ # insert additional entries in indptr:
150
+ # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3]
151
+ # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3]
152
+ it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))
153
+ col_nonzeros = it.ravel()
154
+ indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
155
+ Xt = csc_matrix(
156
+ (X.data, X.indices, indptr),
157
+ shape=(X.shape[0], len(indptr) - 1),
158
+ dtype=X.dtype,
159
+ )
160
+ return Xt
161
+
162
+ support = self.get_support()
163
+ X = check_array(X, dtype=None)
164
+ if support.sum() != X.shape[1]:
165
+ raise ValueError("X has a different shape than during fitting.")
166
+
167
+ if X.ndim == 1:
168
+ X = X[None, :]
169
+ Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
170
+ Xt[:, support] = X
171
+ return Xt
172
+
173
+ def get_feature_names_out(self, input_features=None):
174
+ """Mask feature names according to selected features.
175
+
176
+ Parameters
177
+ ----------
178
+ input_features : array-like of str or None, default=None
179
+ Input features.
180
+
181
+ - If `input_features` is `None`, then `feature_names_in_` is
182
+ used as feature names in. If `feature_names_in_` is not defined,
183
+ then the following input feature names are generated:
184
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
185
+ - If `input_features` is an array-like, then `input_features` must
186
+ match `feature_names_in_` if `feature_names_in_` is defined.
187
+
188
+ Returns
189
+ -------
190
+ feature_names_out : ndarray of str objects
191
+ Transformed feature names.
192
+ """
193
+ check_is_fitted(self)
194
+ input_features = _check_feature_names_in(self, input_features)
195
+ return input_features[self.get_support()]
196
+
197
+
198
+ def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1):
199
+ """
200
+ Retrieve and aggregate (ndim > 1) the feature importances
201
+ from an estimator. Also optionally applies transformation.
202
+
203
+ Parameters
204
+ ----------
205
+ estimator : estimator
206
+ A scikit-learn estimator from which we want to get the feature
207
+ importances.
208
+
209
+ getter : "auto", str or callable
210
+ An attribute or a callable to get the feature importance. If `"auto"`,
211
+ `estimator` is expected to expose `coef_` or `feature_importances`.
212
+
213
+ transform_func : {"norm", "square"}, default=None
214
+ The transform to apply to the feature importances. By default (`None`)
215
+ no transformation is applied.
216
+
217
+ norm_order : int, default=1
218
+ The norm order to apply when `transform_func="norm"`. Only applied
219
+ when `importances.ndim > 1`.
220
+
221
+ Returns
222
+ -------
223
+ importances : ndarray of shape (n_features,)
224
+ The features importances, optionally transformed.
225
+ """
226
+ if isinstance(getter, str):
227
+ if getter == "auto":
228
+ if hasattr(estimator, "coef_"):
229
+ getter = attrgetter("coef_")
230
+ elif hasattr(estimator, "feature_importances_"):
231
+ getter = attrgetter("feature_importances_")
232
+ else:
233
+ raise ValueError(
234
+ "when `importance_getter=='auto'`, the underlying "
235
+ f"estimator {estimator.__class__.__name__} should have "
236
+ "`coef_` or `feature_importances_` attribute. Either "
237
+ "pass a fitted estimator to feature selector or call fit "
238
+ "before calling transform."
239
+ )
240
+ else:
241
+ getter = attrgetter(getter)
242
+ elif not callable(getter):
243
+ raise ValueError("`importance_getter` has to be a string or `callable`")
244
+
245
+ importances = getter(estimator)
246
+
247
+ if transform_func is None:
248
+ return importances
249
+ elif transform_func == "norm":
250
+ if importances.ndim == 1:
251
+ importances = np.abs(importances)
252
+ else:
253
+ importances = np.linalg.norm(importances, axis=0, ord=norm_order)
254
+ elif transform_func == "square":
255
+ if importances.ndim == 1:
256
+ importances = safe_sqr(importances)
257
+ else:
258
+ importances = safe_sqr(importances).sum(axis=0)
259
+ else:
260
+ raise ValueError(
261
+ "Valid values for `transform_func` are "
262
+ + "None, 'norm' and 'square'. Those two "
263
+ + "transformation are only supported now"
264
+ )
265
+
266
+ return importances
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
2
+ # License: BSD 3 clause
3
+
4
+ from copy import deepcopy
5
+ from numbers import Integral, Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone
10
+ from ..exceptions import NotFittedError
11
+ from ..utils._param_validation import HasMethods, Interval, Options
12
+ from ..utils._tags import _safe_tags
13
+ from ..utils.metadata_routing import (
14
+ MetadataRouter,
15
+ MethodMapping,
16
+ _routing_enabled,
17
+ process_routing,
18
+ )
19
+ from ..utils.metaestimators import available_if
20
+ from ..utils.validation import _num_features, check_is_fitted, check_scalar
21
+ from ._base import SelectorMixin, _get_feature_importances
22
+
23
+
24
+ def _calculate_threshold(estimator, importances, threshold):
25
+ """Interpret the threshold value"""
26
+
27
+ if threshold is None:
28
+ # determine default from estimator
29
+ est_name = estimator.__class__.__name__
30
+ is_l1_penalized = hasattr(estimator, "penalty") and estimator.penalty == "l1"
31
+ is_lasso = "Lasso" in est_name
32
+ is_elasticnet_l1_penalized = "ElasticNet" in est_name and (
33
+ (hasattr(estimator, "l1_ratio_") and np.isclose(estimator.l1_ratio_, 1.0))
34
+ or (hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0))
35
+ )
36
+ if is_l1_penalized or is_lasso or is_elasticnet_l1_penalized:
37
+ # the natural default threshold is 0 when l1 penalty was used
38
+ threshold = 1e-5
39
+ else:
40
+ threshold = "mean"
41
+
42
+ if isinstance(threshold, str):
43
+ if "*" in threshold:
44
+ scale, reference = threshold.split("*")
45
+ scale = float(scale.strip())
46
+ reference = reference.strip()
47
+
48
+ if reference == "median":
49
+ reference = np.median(importances)
50
+ elif reference == "mean":
51
+ reference = np.mean(importances)
52
+ else:
53
+ raise ValueError("Unknown reference: " + reference)
54
+
55
+ threshold = scale * reference
56
+
57
+ elif threshold == "median":
58
+ threshold = np.median(importances)
59
+
60
+ elif threshold == "mean":
61
+ threshold = np.mean(importances)
62
+
63
+ else:
64
+ raise ValueError(
65
+ "Expected threshold='mean' or threshold='median' got %s" % threshold
66
+ )
67
+
68
+ else:
69
+ threshold = float(threshold)
70
+
71
+ return threshold
72
+
73
+
74
+ def _estimator_has(attr):
75
+ """Check if we can delegate a method to the underlying estimator.
76
+
77
+ First, we check the fitted `estimator_` if available, otherwise we check the
78
+ unfitted `estimator`. We raise the original `AttributeError` if `attr` does
79
+ not exist. This function is used together with `available_if`.
80
+ """
81
+
82
+ def check(self):
83
+ if hasattr(self, "estimator_"):
84
+ getattr(self.estimator_, attr)
85
+ else:
86
+ getattr(self.estimator, attr)
87
+
88
+ return True
89
+
90
+ return check
91
+
92
+
93
+ class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
94
+ """Meta-transformer for selecting features based on importance weights.
95
+
96
+ .. versionadded:: 0.17
97
+
98
+ Read more in the :ref:`User Guide <select_from_model>`.
99
+
100
+ Parameters
101
+ ----------
102
+ estimator : object
103
+ The base estimator from which the transformer is built.
104
+ This can be both a fitted (if ``prefit`` is set to True)
105
+ or a non-fitted estimator. The estimator should have a
106
+ ``feature_importances_`` or ``coef_`` attribute after fitting.
107
+ Otherwise, the ``importance_getter`` parameter should be used.
108
+
109
+ threshold : str or float, default=None
110
+ The threshold value to use for feature selection. Features whose
111
+ absolute importance value is greater or equal are kept while the others
112
+ are discarded. If "median" (resp. "mean"), then the ``threshold`` value
113
+ is the median (resp. the mean) of the feature importances. A scaling
114
+ factor (e.g., "1.25*mean") may also be used. If None and if the
115
+ estimator has a parameter penalty set to l1, either explicitly
116
+ or implicitly (e.g, Lasso), the threshold used is 1e-5.
117
+ Otherwise, "mean" is used by default.
118
+
119
+ prefit : bool, default=False
120
+ Whether a prefit model is expected to be passed into the constructor
121
+ directly or not.
122
+ If `True`, `estimator` must be a fitted estimator.
123
+ If `False`, `estimator` is fitted and updated by calling
124
+ `fit` and `partial_fit`, respectively.
125
+
126
+ norm_order : non-zero int, inf, -inf, default=1
127
+ Order of the norm used to filter the vectors of coefficients below
128
+ ``threshold`` in the case where the ``coef_`` attribute of the
129
+ estimator is of dimension 2.
130
+
131
+ max_features : int, callable, default=None
132
+ The maximum number of features to select.
133
+
134
+ - If an integer, then it specifies the maximum number of features to
135
+ allow.
136
+ - If a callable, then it specifies how to calculate the maximum number of
137
+ features allowed by using the output of `max_features(X)`.
138
+ - If `None`, then all features are kept.
139
+
140
+ To only select based on ``max_features``, set ``threshold=-np.inf``.
141
+
142
+ .. versionadded:: 0.20
143
+ .. versionchanged:: 1.1
144
+ `max_features` accepts a callable.
145
+
146
+ importance_getter : str or callable, default='auto'
147
+ If 'auto', uses the feature importance either through a ``coef_``
148
+ attribute or ``feature_importances_`` attribute of estimator.
149
+
150
+ Also accepts a string that specifies an attribute name/path
151
+ for extracting feature importance (implemented with `attrgetter`).
152
+ For example, give `regressor_.coef_` in case of
153
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
154
+ `named_steps.clf.feature_importances_` in case of
155
+ :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
156
+
157
+ If `callable`, overrides the default feature importance getter.
158
+ The callable is passed with the fitted estimator and it should
159
+ return importance for each feature.
160
+
161
+ .. versionadded:: 0.24
162
+
163
+ Attributes
164
+ ----------
165
+ estimator_ : estimator
166
+ The base estimator from which the transformer is built. This attribute
167
+ exist only when `fit` has been called.
168
+
169
+ - If `prefit=True`, it is a deep copy of `estimator`.
170
+ - If `prefit=False`, it is a clone of `estimator` and fit on the data
171
+ passed to `fit` or `partial_fit`.
172
+
173
+ n_features_in_ : int
174
+ Number of features seen during :term:`fit`. Only defined if the
175
+ underlying estimator exposes such an attribute when fit.
176
+
177
+ .. versionadded:: 0.24
178
+
179
+ max_features_ : int
180
+ Maximum number of features calculated during :term:`fit`. Only defined
181
+ if the ``max_features`` is not `None`.
182
+
183
+ - If `max_features` is an `int`, then `max_features_ = max_features`.
184
+ - If `max_features` is a callable, then `max_features_ = max_features(X)`.
185
+
186
+ .. versionadded:: 1.1
187
+
188
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
189
+ Names of features seen during :term:`fit`. Defined only when `X`
190
+ has feature names that are all strings.
191
+
192
+ .. versionadded:: 1.0
193
+
194
+ threshold_ : float
195
+ The threshold value used for feature selection.
196
+
197
+ See Also
198
+ --------
199
+ RFE : Recursive feature elimination based on importance weights.
200
+ RFECV : Recursive feature elimination with built-in cross-validated
201
+ selection of the best number of features.
202
+ SequentialFeatureSelector : Sequential cross-validation based feature
203
+ selection. Does not rely on importance weights.
204
+
205
+ Notes
206
+ -----
207
+ Allows NaN/Inf in the input if the underlying estimator does as well.
208
+
209
+ Examples
210
+ --------
211
+ >>> from sklearn.feature_selection import SelectFromModel
212
+ >>> from sklearn.linear_model import LogisticRegression
213
+ >>> X = [[ 0.87, -1.34, 0.31 ],
214
+ ... [-2.79, -0.02, -0.85 ],
215
+ ... [-1.34, -0.48, -2.55 ],
216
+ ... [ 1.92, 1.48, 0.65 ]]
217
+ >>> y = [0, 1, 0, 1]
218
+ >>> selector = SelectFromModel(estimator=LogisticRegression()).fit(X, y)
219
+ >>> selector.estimator_.coef_
220
+ array([[-0.3252..., 0.8345..., 0.4976...]])
221
+ >>> selector.threshold_
222
+ 0.55249...
223
+ >>> selector.get_support()
224
+ array([False, True, False])
225
+ >>> selector.transform(X)
226
+ array([[-1.34],
227
+ [-0.02],
228
+ [-0.48],
229
+ [ 1.48]])
230
+
231
+ Using a callable to create a selector that can use no more than half
232
+ of the input features.
233
+
234
+ >>> def half_callable(X):
235
+ ... return round(len(X[0]) / 2)
236
+ >>> half_selector = SelectFromModel(estimator=LogisticRegression(),
237
+ ... max_features=half_callable)
238
+ >>> _ = half_selector.fit(X, y)
239
+ >>> half_selector.max_features_
240
+ 2
241
+ """
242
+
243
+ _parameter_constraints: dict = {
244
+ "estimator": [HasMethods("fit")],
245
+ "threshold": [Interval(Real, None, None, closed="both"), str, None],
246
+ "prefit": ["boolean"],
247
+ "norm_order": [
248
+ Interval(Integral, None, -1, closed="right"),
249
+ Interval(Integral, 1, None, closed="left"),
250
+ Options(Real, {np.inf, -np.inf}),
251
+ ],
252
+ "max_features": [Interval(Integral, 0, None, closed="left"), callable, None],
253
+ "importance_getter": [str, callable],
254
+ }
255
+
256
+ def __init__(
257
+ self,
258
+ estimator,
259
+ *,
260
+ threshold=None,
261
+ prefit=False,
262
+ norm_order=1,
263
+ max_features=None,
264
+ importance_getter="auto",
265
+ ):
266
+ self.estimator = estimator
267
+ self.threshold = threshold
268
+ self.prefit = prefit
269
+ self.importance_getter = importance_getter
270
+ self.norm_order = norm_order
271
+ self.max_features = max_features
272
+
273
+ def _get_support_mask(self):
274
+ estimator = getattr(self, "estimator_", self.estimator)
275
+ max_features = getattr(self, "max_features_", self.max_features)
276
+
277
+ if self.prefit:
278
+ try:
279
+ check_is_fitted(self.estimator)
280
+ except NotFittedError as exc:
281
+ raise NotFittedError(
282
+ "When `prefit=True`, `estimator` is expected to be a fitted "
283
+ "estimator."
284
+ ) from exc
285
+ if callable(max_features):
286
+ # This branch is executed when `transform` is called directly and thus
287
+ # `max_features_` is not set and we fallback using `self.max_features`
288
+ # that is not validated
289
+ raise NotFittedError(
290
+ "When `prefit=True` and `max_features` is a callable, call `fit` "
291
+ "before calling `transform`."
292
+ )
293
+ elif max_features is not None and not isinstance(max_features, Integral):
294
+ raise ValueError(
295
+ f"`max_features` must be an integer. Got `max_features={max_features}` "
296
+ "instead."
297
+ )
298
+
299
+ scores = _get_feature_importances(
300
+ estimator=estimator,
301
+ getter=self.importance_getter,
302
+ transform_func="norm",
303
+ norm_order=self.norm_order,
304
+ )
305
+ threshold = _calculate_threshold(estimator, scores, self.threshold)
306
+ if self.max_features is not None:
307
+ mask = np.zeros_like(scores, dtype=bool)
308
+ candidate_indices = np.argsort(-scores, kind="mergesort")[:max_features]
309
+ mask[candidate_indices] = True
310
+ else:
311
+ mask = np.ones_like(scores, dtype=bool)
312
+ mask[scores < threshold] = False
313
+ return mask
314
+
315
+ def _check_max_features(self, X):
316
+ if self.max_features is not None:
317
+ n_features = _num_features(X)
318
+
319
+ if callable(self.max_features):
320
+ max_features = self.max_features(X)
321
+ else: # int
322
+ max_features = self.max_features
323
+
324
+ check_scalar(
325
+ max_features,
326
+ "max_features",
327
+ Integral,
328
+ min_val=0,
329
+ max_val=n_features,
330
+ )
331
+ self.max_features_ = max_features
332
+
333
+ @_fit_context(
334
+ # SelectFromModel.estimator is not validated yet
335
+ prefer_skip_nested_validation=False
336
+ )
337
+ def fit(self, X, y=None, **fit_params):
338
+ """Fit the SelectFromModel meta-transformer.
339
+
340
+ Parameters
341
+ ----------
342
+ X : array-like of shape (n_samples, n_features)
343
+ The training input samples.
344
+
345
+ y : array-like of shape (n_samples,), default=None
346
+ The target values (integers that correspond to classes in
347
+ classification, real numbers in regression).
348
+
349
+ **fit_params : dict
350
+ - If `enable_metadata_routing=False` (default):
351
+
352
+ Parameters directly passed to the `partial_fit` method of the
353
+ sub-estimator. They are ignored if `prefit=True`.
354
+
355
+ - If `enable_metadata_routing=True`:
356
+
357
+ Parameters safely routed to the `partial_fit` method of the
358
+ sub-estimator. They are ignored if `prefit=True`.
359
+
360
+ .. versionchanged:: 1.4
361
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
362
+ more details.
363
+
364
+ Returns
365
+ -------
366
+ self : object
367
+ Fitted estimator.
368
+ """
369
+ self._check_max_features(X)
370
+
371
+ if self.prefit:
372
+ try:
373
+ check_is_fitted(self.estimator)
374
+ except NotFittedError as exc:
375
+ raise NotFittedError(
376
+ "When `prefit=True`, `estimator` is expected to be a fitted "
377
+ "estimator."
378
+ ) from exc
379
+ self.estimator_ = deepcopy(self.estimator)
380
+ else:
381
+ if _routing_enabled():
382
+ routed_params = process_routing(self, "fit", **fit_params)
383
+ self.estimator_ = clone(self.estimator)
384
+ self.estimator_.fit(X, y, **routed_params.estimator.fit)
385
+ else:
386
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
387
+ self.estimator_ = clone(self.estimator)
388
+ self.estimator_.fit(X, y, **fit_params)
389
+
390
+ if hasattr(self.estimator_, "feature_names_in_"):
391
+ self.feature_names_in_ = self.estimator_.feature_names_in_
392
+ else:
393
+ self._check_feature_names(X, reset=True)
394
+
395
+ return self
396
+
397
+ @property
398
+ def threshold_(self):
399
+ """Threshold value used for feature selection."""
400
+ scores = _get_feature_importances(
401
+ estimator=self.estimator_,
402
+ getter=self.importance_getter,
403
+ transform_func="norm",
404
+ norm_order=self.norm_order,
405
+ )
406
+ return _calculate_threshold(self.estimator, scores, self.threshold)
407
+
408
+ @available_if(_estimator_has("partial_fit"))
409
+ @_fit_context(
410
+ # SelectFromModel.estimator is not validated yet
411
+ prefer_skip_nested_validation=False
412
+ )
413
+ def partial_fit(self, X, y=None, **partial_fit_params):
414
+ """Fit the SelectFromModel meta-transformer only once.
415
+
416
+ Parameters
417
+ ----------
418
+ X : array-like of shape (n_samples, n_features)
419
+ The training input samples.
420
+
421
+ y : array-like of shape (n_samples,), default=None
422
+ The target values (integers that correspond to classes in
423
+ classification, real numbers in regression).
424
+
425
+ **partial_fit_params : dict
426
+ - If `enable_metadata_routing=False` (default):
427
+
428
+ Parameters directly passed to the `partial_fit` method of the
429
+ sub-estimator.
430
+
431
+ - If `enable_metadata_routing=True`:
432
+
433
+ Parameters passed to the `partial_fit` method of the
434
+ sub-estimator. They are ignored if `prefit=True`.
435
+
436
+ .. versionchanged:: 1.4
437
+ `**partial_fit_params` are routed to the sub-estimator, if
438
+ `enable_metadata_routing=True` is set via
439
+ :func:`~sklearn.set_config`, which allows for aliasing.
440
+
441
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
442
+ more details.
443
+
444
+ Returns
445
+ -------
446
+ self : object
447
+ Fitted estimator.
448
+ """
449
+ first_call = not hasattr(self, "estimator_")
450
+
451
+ if first_call:
452
+ self._check_max_features(X)
453
+
454
+ if self.prefit:
455
+ if first_call:
456
+ try:
457
+ check_is_fitted(self.estimator)
458
+ except NotFittedError as exc:
459
+ raise NotFittedError(
460
+ "When `prefit=True`, `estimator` is expected to be a fitted "
461
+ "estimator."
462
+ ) from exc
463
+ self.estimator_ = deepcopy(self.estimator)
464
+ return self
465
+
466
+ if first_call:
467
+ self.estimator_ = clone(self.estimator)
468
+ if _routing_enabled():
469
+ routed_params = process_routing(self, "partial_fit", **partial_fit_params)
470
+ self.estimator_ = clone(self.estimator)
471
+ self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit)
472
+ else:
473
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
474
+ self.estimator_.partial_fit(X, y, **partial_fit_params)
475
+
476
+ if hasattr(self.estimator_, "feature_names_in_"):
477
+ self.feature_names_in_ = self.estimator_.feature_names_in_
478
+ else:
479
+ self._check_feature_names(X, reset=first_call)
480
+
481
+ return self
482
+
483
+ @property
484
+ def n_features_in_(self):
485
+ """Number of features seen during `fit`."""
486
+ # For consistency with other estimators we raise a AttributeError so
487
+ # that hasattr() fails if the estimator isn't fitted.
488
+ try:
489
+ check_is_fitted(self)
490
+ except NotFittedError as nfe:
491
+ raise AttributeError(
492
+ "{} object has no n_features_in_ attribute.".format(
493
+ self.__class__.__name__
494
+ )
495
+ ) from nfe
496
+
497
+ return self.estimator_.n_features_in_
498
+
499
+ def get_metadata_routing(self):
500
+ """Get metadata routing of this object.
501
+
502
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
503
+ mechanism works.
504
+
505
+ .. versionadded:: 1.4
506
+
507
+ Returns
508
+ -------
509
+ routing : MetadataRouter
510
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
511
+ routing information.
512
+ """
513
+ router = MetadataRouter(owner=self.__class__.__name__).add(
514
+ estimator=self.estimator,
515
+ method_mapping=MethodMapping()
516
+ .add(callee="partial_fit", caller="partial_fit")
517
+ .add(callee="fit", caller="fit"),
518
+ )
519
+ return router
520
+
521
+ def _more_tags(self):
522
+ return {"allow_nan": _safe_tags(self.estimator, key="allow_nan")}
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Nikolay Mayorov <[email protected]>
2
+ # License: 3-clause BSD
3
+
4
+ from numbers import Integral
5
+
6
+ import numpy as np
7
+ from scipy.sparse import issparse
8
+ from scipy.special import digamma
9
+
10
+ from ..metrics.cluster import mutual_info_score
11
+ from ..neighbors import KDTree, NearestNeighbors
12
+ from ..preprocessing import scale
13
+ from ..utils import check_random_state
14
+ from ..utils._param_validation import Interval, StrOptions, validate_params
15
+ from ..utils.multiclass import check_classification_targets
16
+ from ..utils.validation import check_array, check_X_y
17
+
18
+
19
+ def _compute_mi_cc(x, y, n_neighbors):
20
+ """Compute mutual information between two continuous variables.
21
+
22
+ Parameters
23
+ ----------
24
+ x, y : ndarray, shape (n_samples,)
25
+ Samples of two continuous random variables, must have an identical
26
+ shape.
27
+
28
+ n_neighbors : int
29
+ Number of nearest neighbors to search for each point, see [1]_.
30
+
31
+ Returns
32
+ -------
33
+ mi : float
34
+ Estimated mutual information in nat units. If it turned out to be
35
+ negative it is replaced by 0.
36
+
37
+ Notes
38
+ -----
39
+ True mutual information can't be negative. If its estimate by a numerical
40
+ method is negative, it means (providing the method is adequate) that the
41
+ mutual information is close to 0 and replacing it by 0 is a reasonable
42
+ strategy.
43
+
44
+ References
45
+ ----------
46
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
47
+ information". Phys. Rev. E 69, 2004.
48
+ """
49
+ n_samples = x.size
50
+
51
+ x = x.reshape((-1, 1))
52
+ y = y.reshape((-1, 1))
53
+ xy = np.hstack((x, y))
54
+
55
+ # Here we rely on NearestNeighbors to select the fastest algorithm.
56
+ nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors)
57
+
58
+ nn.fit(xy)
59
+ radius = nn.kneighbors()[0]
60
+ radius = np.nextafter(radius[:, -1], 0)
61
+
62
+ # KDTree is explicitly fit to allow for the querying of number of
63
+ # neighbors within a specified radius
64
+ kd = KDTree(x, metric="chebyshev")
65
+ nx = kd.query_radius(x, radius, count_only=True, return_distance=False)
66
+ nx = np.array(nx) - 1.0
67
+
68
+ kd = KDTree(y, metric="chebyshev")
69
+ ny = kd.query_radius(y, radius, count_only=True, return_distance=False)
70
+ ny = np.array(ny) - 1.0
71
+
72
+ mi = (
73
+ digamma(n_samples)
74
+ + digamma(n_neighbors)
75
+ - np.mean(digamma(nx + 1))
76
+ - np.mean(digamma(ny + 1))
77
+ )
78
+
79
+ return max(0, mi)
80
+
81
+
82
+ def _compute_mi_cd(c, d, n_neighbors):
83
+ """Compute mutual information between continuous and discrete variables.
84
+
85
+ Parameters
86
+ ----------
87
+ c : ndarray, shape (n_samples,)
88
+ Samples of a continuous random variable.
89
+
90
+ d : ndarray, shape (n_samples,)
91
+ Samples of a discrete random variable.
92
+
93
+ n_neighbors : int
94
+ Number of nearest neighbors to search for each point, see [1]_.
95
+
96
+ Returns
97
+ -------
98
+ mi : float
99
+ Estimated mutual information in nat units. If it turned out to be
100
+ negative it is replaced by 0.
101
+
102
+ Notes
103
+ -----
104
+ True mutual information can't be negative. If its estimate by a numerical
105
+ method is negative, it means (providing the method is adequate) that the
106
+ mutual information is close to 0 and replacing it by 0 is a reasonable
107
+ strategy.
108
+
109
+ References
110
+ ----------
111
+ .. [1] B. C. Ross "Mutual Information between Discrete and Continuous
112
+ Data Sets". PLoS ONE 9(2), 2014.
113
+ """
114
+ n_samples = c.shape[0]
115
+ c = c.reshape((-1, 1))
116
+
117
+ radius = np.empty(n_samples)
118
+ label_counts = np.empty(n_samples)
119
+ k_all = np.empty(n_samples)
120
+ nn = NearestNeighbors()
121
+ for label in np.unique(d):
122
+ mask = d == label
123
+ count = np.sum(mask)
124
+ if count > 1:
125
+ k = min(n_neighbors, count - 1)
126
+ nn.set_params(n_neighbors=k)
127
+ nn.fit(c[mask])
128
+ r = nn.kneighbors()[0]
129
+ radius[mask] = np.nextafter(r[:, -1], 0)
130
+ k_all[mask] = k
131
+ label_counts[mask] = count
132
+
133
+ # Ignore points with unique labels.
134
+ mask = label_counts > 1
135
+ n_samples = np.sum(mask)
136
+ label_counts = label_counts[mask]
137
+ k_all = k_all[mask]
138
+ c = c[mask]
139
+ radius = radius[mask]
140
+
141
+ kd = KDTree(c)
142
+ m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)
143
+ m_all = np.array(m_all)
144
+
145
+ mi = (
146
+ digamma(n_samples)
147
+ + np.mean(digamma(k_all))
148
+ - np.mean(digamma(label_counts))
149
+ - np.mean(digamma(m_all))
150
+ )
151
+
152
+ return max(0, mi)
153
+
154
+
155
+ def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):
156
+ """Compute mutual information between two variables.
157
+
158
+ This is a simple wrapper which selects a proper function to call based on
159
+ whether `x` and `y` are discrete or not.
160
+ """
161
+ if x_discrete and y_discrete:
162
+ return mutual_info_score(x, y)
163
+ elif x_discrete and not y_discrete:
164
+ return _compute_mi_cd(y, x, n_neighbors)
165
+ elif not x_discrete and y_discrete:
166
+ return _compute_mi_cd(x, y, n_neighbors)
167
+ else:
168
+ return _compute_mi_cc(x, y, n_neighbors)
169
+
170
+
171
+ def _iterate_columns(X, columns=None):
172
+ """Iterate over columns of a matrix.
173
+
174
+ Parameters
175
+ ----------
176
+ X : ndarray or csc_matrix, shape (n_samples, n_features)
177
+ Matrix over which to iterate.
178
+
179
+ columns : iterable or None, default=None
180
+ Indices of columns to iterate over. If None, iterate over all columns.
181
+
182
+ Yields
183
+ ------
184
+ x : ndarray, shape (n_samples,)
185
+ Columns of `X` in dense format.
186
+ """
187
+ if columns is None:
188
+ columns = range(X.shape[1])
189
+
190
+ if issparse(X):
191
+ for i in columns:
192
+ x = np.zeros(X.shape[0])
193
+ start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]
194
+ x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]
195
+ yield x
196
+ else:
197
+ for i in columns:
198
+ yield X[:, i]
199
+
200
+
201
+ def _estimate_mi(
202
+ X,
203
+ y,
204
+ discrete_features="auto",
205
+ discrete_target=False,
206
+ n_neighbors=3,
207
+ copy=True,
208
+ random_state=None,
209
+ ):
210
+ """Estimate mutual information between the features and the target.
211
+
212
+ Parameters
213
+ ----------
214
+ X : array-like or sparse matrix, shape (n_samples, n_features)
215
+ Feature matrix.
216
+
217
+ y : array-like of shape (n_samples,)
218
+ Target vector.
219
+
220
+ discrete_features : {'auto', bool, array-like}, default='auto'
221
+ If bool, then determines whether to consider all features discrete
222
+ or continuous. If array, then it should be either a boolean mask
223
+ with shape (n_features,) or array with indices of discrete features.
224
+ If 'auto', it is assigned to False for dense `X` and to True for
225
+ sparse `X`.
226
+
227
+ discrete_target : bool, default=False
228
+ Whether to consider `y` as a discrete variable.
229
+
230
+ n_neighbors : int, default=3
231
+ Number of neighbors to use for MI estimation for continuous variables,
232
+ see [1]_ and [2]_. Higher values reduce variance of the estimation, but
233
+ could introduce a bias.
234
+
235
+ copy : bool, default=True
236
+ Whether to make a copy of the given data. If set to False, the initial
237
+ data will be overwritten.
238
+
239
+ random_state : int, RandomState instance or None, default=None
240
+ Determines random number generation for adding small noise to
241
+ continuous variables in order to remove repeated values.
242
+ Pass an int for reproducible results across multiple function calls.
243
+ See :term:`Glossary <random_state>`.
244
+
245
+ Returns
246
+ -------
247
+ mi : ndarray, shape (n_features,)
248
+ Estimated mutual information between each feature and the target in
249
+ nat units. A negative value will be replaced by 0.
250
+
251
+ References
252
+ ----------
253
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
254
+ information". Phys. Rev. E 69, 2004.
255
+ .. [2] B. C. Ross "Mutual Information between Discrete and Continuous
256
+ Data Sets". PLoS ONE 9(2), 2014.
257
+ """
258
+ X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target)
259
+ n_samples, n_features = X.shape
260
+
261
+ if isinstance(discrete_features, (str, bool)):
262
+ if isinstance(discrete_features, str):
263
+ if discrete_features == "auto":
264
+ discrete_features = issparse(X)
265
+ else:
266
+ raise ValueError("Invalid string value for discrete_features.")
267
+ discrete_mask = np.empty(n_features, dtype=bool)
268
+ discrete_mask.fill(discrete_features)
269
+ else:
270
+ discrete_features = check_array(discrete_features, ensure_2d=False)
271
+ if discrete_features.dtype != "bool":
272
+ discrete_mask = np.zeros(n_features, dtype=bool)
273
+ discrete_mask[discrete_features] = True
274
+ else:
275
+ discrete_mask = discrete_features
276
+
277
+ continuous_mask = ~discrete_mask
278
+ if np.any(continuous_mask) and issparse(X):
279
+ raise ValueError("Sparse matrix `X` can't have continuous features.")
280
+
281
+ rng = check_random_state(random_state)
282
+ if np.any(continuous_mask):
283
+ X = X.astype(np.float64, copy=copy)
284
+ X[:, continuous_mask] = scale(
285
+ X[:, continuous_mask], with_mean=False, copy=False
286
+ )
287
+
288
+ # Add small noise to continuous features as advised in Kraskov et. al.
289
+ means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))
290
+ X[:, continuous_mask] += (
291
+ 1e-10
292
+ * means
293
+ * rng.standard_normal(size=(n_samples, np.sum(continuous_mask)))
294
+ )
295
+
296
+ if not discrete_target:
297
+ y = scale(y, with_mean=False)
298
+ y += (
299
+ 1e-10
300
+ * np.maximum(1, np.mean(np.abs(y)))
301
+ * rng.standard_normal(size=n_samples)
302
+ )
303
+
304
+ mi = [
305
+ _compute_mi(x, y, discrete_feature, discrete_target, n_neighbors)
306
+ for x, discrete_feature in zip(_iterate_columns(X), discrete_mask)
307
+ ]
308
+
309
+ return np.array(mi)
310
+
311
+
312
+ @validate_params(
313
+ {
314
+ "X": ["array-like", "sparse matrix"],
315
+ "y": ["array-like"],
316
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
317
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
318
+ "copy": ["boolean"],
319
+ "random_state": ["random_state"],
320
+ },
321
+ prefer_skip_nested_validation=True,
322
+ )
323
+ def mutual_info_regression(
324
+ X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None
325
+ ):
326
+ """Estimate mutual information for a continuous target variable.
327
+
328
+ Mutual information (MI) [1]_ between two random variables is a non-negative
329
+ value, which measures the dependency between the variables. It is equal
330
+ to zero if and only if two random variables are independent, and higher
331
+ values mean higher dependency.
332
+
333
+ The function relies on nonparametric methods based on entropy estimation
334
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
335
+ methods are based on the idea originally proposed in [4]_.
336
+
337
+ It can be used for univariate features selection, read more in the
338
+ :ref:`User Guide <univariate_feature_selection>`.
339
+
340
+ Parameters
341
+ ----------
342
+ X : array-like or sparse matrix, shape (n_samples, n_features)
343
+ Feature matrix.
344
+
345
+ y : array-like of shape (n_samples,)
346
+ Target vector.
347
+
348
+ discrete_features : {'auto', bool, array-like}, default='auto'
349
+ If bool, then determines whether to consider all features discrete
350
+ or continuous. If array, then it should be either a boolean mask
351
+ with shape (n_features,) or array with indices of discrete features.
352
+ If 'auto', it is assigned to False for dense `X` and to True for
353
+ sparse `X`.
354
+
355
+ n_neighbors : int, default=3
356
+ Number of neighbors to use for MI estimation for continuous variables,
357
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
358
+ could introduce a bias.
359
+
360
+ copy : bool, default=True
361
+ Whether to make a copy of the given data. If set to False, the initial
362
+ data will be overwritten.
363
+
364
+ random_state : int, RandomState instance or None, default=None
365
+ Determines random number generation for adding small noise to
366
+ continuous variables in order to remove repeated values.
367
+ Pass an int for reproducible results across multiple function calls.
368
+ See :term:`Glossary <random_state>`.
369
+
370
+ Returns
371
+ -------
372
+ mi : ndarray, shape (n_features,)
373
+ Estimated mutual information between each feature and the target in
374
+ nat units.
375
+
376
+ Notes
377
+ -----
378
+ 1. The term "discrete features" is used instead of naming them
379
+ "categorical", because it describes the essence more accurately.
380
+ For example, pixel intensities of an image are discrete features
381
+ (but hardly categorical) and you will get better results if mark them
382
+ as such. Also note, that treating a continuous variable as discrete and
383
+ vice versa will usually give incorrect results, so be attentive about
384
+ that.
385
+ 2. True mutual information can't be negative. If its estimate turns out
386
+ to be negative, it is replaced by zero.
387
+
388
+ References
389
+ ----------
390
+ .. [1] `Mutual Information
391
+ <https://en.wikipedia.org/wiki/Mutual_information>`_
392
+ on Wikipedia.
393
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
394
+ information". Phys. Rev. E 69, 2004.
395
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
396
+ Data Sets". PLoS ONE 9(2), 2014.
397
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
398
+ of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
399
+
400
+ Examples
401
+ --------
402
+ >>> from sklearn.datasets import make_regression
403
+ >>> from sklearn.feature_selection import mutual_info_regression
404
+ >>> X, y = make_regression(
405
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
406
+ ... )
407
+ >>> mutual_info_regression(X, y)
408
+ array([0.1..., 2.6... , 0.0...])
409
+ """
410
+ return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state)
411
+
412
+
413
+ @validate_params(
414
+ {
415
+ "X": ["array-like", "sparse matrix"],
416
+ "y": ["array-like"],
417
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
418
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
419
+ "copy": ["boolean"],
420
+ "random_state": ["random_state"],
421
+ },
422
+ prefer_skip_nested_validation=True,
423
+ )
424
+ def mutual_info_classif(
425
+ X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None
426
+ ):
427
+ """Estimate mutual information for a discrete target variable.
428
+
429
+ Mutual information (MI) [1]_ between two random variables is a non-negative
430
+ value, which measures the dependency between the variables. It is equal
431
+ to zero if and only if two random variables are independent, and higher
432
+ values mean higher dependency.
433
+
434
+ The function relies on nonparametric methods based on entropy estimation
435
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
436
+ methods are based on the idea originally proposed in [4]_.
437
+
438
+ It can be used for univariate features selection, read more in the
439
+ :ref:`User Guide <univariate_feature_selection>`.
440
+
441
+ Parameters
442
+ ----------
443
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
444
+ Feature matrix.
445
+
446
+ y : array-like of shape (n_samples,)
447
+ Target vector.
448
+
449
+ discrete_features : 'auto', bool or array-like, default='auto'
450
+ If bool, then determines whether to consider all features discrete
451
+ or continuous. If array, then it should be either a boolean mask
452
+ with shape (n_features,) or array with indices of discrete features.
453
+ If 'auto', it is assigned to False for dense `X` and to True for
454
+ sparse `X`.
455
+
456
+ n_neighbors : int, default=3
457
+ Number of neighbors to use for MI estimation for continuous variables,
458
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
459
+ could introduce a bias.
460
+
461
+ copy : bool, default=True
462
+ Whether to make a copy of the given data. If set to False, the initial
463
+ data will be overwritten.
464
+
465
+ random_state : int, RandomState instance or None, default=None
466
+ Determines random number generation for adding small noise to
467
+ continuous variables in order to remove repeated values.
468
+ Pass an int for reproducible results across multiple function calls.
469
+ See :term:`Glossary <random_state>`.
470
+
471
+ Returns
472
+ -------
473
+ mi : ndarray, shape (n_features,)
474
+ Estimated mutual information between each feature and the target in
475
+ nat units.
476
+
477
+ Notes
478
+ -----
479
+ 1. The term "discrete features" is used instead of naming them
480
+ "categorical", because it describes the essence more accurately.
481
+ For example, pixel intensities of an image are discrete features
482
+ (but hardly categorical) and you will get better results if mark them
483
+ as such. Also note, that treating a continuous variable as discrete and
484
+ vice versa will usually give incorrect results, so be attentive about
485
+ that.
486
+ 2. True mutual information can't be negative. If its estimate turns out
487
+ to be negative, it is replaced by zero.
488
+
489
+ References
490
+ ----------
491
+ .. [1] `Mutual Information
492
+ <https://en.wikipedia.org/wiki/Mutual_information>`_
493
+ on Wikipedia.
494
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
495
+ information". Phys. Rev. E 69, 2004.
496
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
497
+ Data Sets". PLoS ONE 9(2), 2014.
498
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
499
+ of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
500
+
501
+ Examples
502
+ --------
503
+ >>> from sklearn.datasets import make_classification
504
+ >>> from sklearn.feature_selection import mutual_info_classif
505
+ >>> X, y = make_classification(
506
+ ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
507
+ ... shuffle=False, random_state=42
508
+ ... )
509
+ >>> mutual_info_classif(X, y)
510
+ array([0.58..., 0.10..., 0.19..., 0.09... , 0. ,
511
+ 0. , 0. , 0. , 0. , 0. ])
512
+ """
513
+ check_classification_targets(y)
514
+ return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state)
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Alexandre Gramfort <[email protected]>
2
+ # Vincent Michel <[email protected]>
3
+ # Gilles Louppe <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ """Recursive feature elimination for feature ranking"""
8
+
9
+ from numbers import Integral
10
+
11
+ import numpy as np
12
+ from joblib import effective_n_jobs
13
+
14
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
15
+ from ..metrics import check_scoring
16
+ from ..model_selection import check_cv
17
+ from ..model_selection._validation import _score
18
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt
19
+ from ..utils.metadata_routing import (
20
+ _raise_for_unsupported_routing,
21
+ _RoutingNotSupportedMixin,
22
+ )
23
+ from ..utils.metaestimators import _safe_split, available_if
24
+ from ..utils.parallel import Parallel, delayed
25
+ from ..utils.validation import check_is_fitted
26
+ from ._base import SelectorMixin, _get_feature_importances
27
+
28
+
29
+ def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
30
+ """
31
+ Return the score for a fit across one fold.
32
+ """
33
+ X_train, y_train = _safe_split(estimator, X, y, train)
34
+ X_test, y_test = _safe_split(estimator, X, y, test, train)
35
+ return rfe._fit(
36
+ X_train,
37
+ y_train,
38
+ lambda estimator, features: _score(
39
+ # TODO(SLEP6): pass score_params here
40
+ estimator,
41
+ X_test[:, features],
42
+ y_test,
43
+ scorer,
44
+ score_params=None,
45
+ ),
46
+ ).scores_
47
+
48
+
49
+ def _estimator_has(attr):
50
+ """Check if we can delegate a method to the underlying estimator.
51
+
52
+ First, we check the fitted `estimator_` if available, otherwise we check the
53
+ unfitted `estimator`. We raise the original `AttributeError` if `attr` does
54
+ not exist. This function is used together with `available_if`.
55
+ """
56
+
57
+ def check(self):
58
+ if hasattr(self, "estimator_"):
59
+ getattr(self.estimator_, attr)
60
+ else:
61
+ getattr(self.estimator, attr)
62
+
63
+ return True
64
+
65
+ return check
66
+
67
+
68
+ class RFE(_RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator):
69
+ """Feature ranking with recursive feature elimination.
70
+
71
+ Given an external estimator that assigns weights to features (e.g., the
72
+ coefficients of a linear model), the goal of recursive feature elimination
73
+ (RFE) is to select features by recursively considering smaller and smaller
74
+ sets of features. First, the estimator is trained on the initial set of
75
+ features and the importance of each feature is obtained either through
76
+ any specific attribute or callable.
77
+ Then, the least important features are pruned from current set of features.
78
+ That procedure is recursively repeated on the pruned set until the desired
79
+ number of features to select is eventually reached.
80
+
81
+ Read more in the :ref:`User Guide <rfe>`.
82
+
83
+ Parameters
84
+ ----------
85
+ estimator : ``Estimator`` instance
86
+ A supervised learning estimator with a ``fit`` method that provides
87
+ information about feature importance
88
+ (e.g. `coef_`, `feature_importances_`).
89
+
90
+ n_features_to_select : int or float, default=None
91
+ The number of features to select. If `None`, half of the features are
92
+ selected. If integer, the parameter is the absolute number of features
93
+ to select. If float between 0 and 1, it is the fraction of features to
94
+ select.
95
+
96
+ .. versionchanged:: 0.24
97
+ Added float values for fractions.
98
+
99
+ step : int or float, default=1
100
+ If greater than or equal to 1, then ``step`` corresponds to the
101
+ (integer) number of features to remove at each iteration.
102
+ If within (0.0, 1.0), then ``step`` corresponds to the percentage
103
+ (rounded down) of features to remove at each iteration.
104
+
105
+ verbose : int, default=0
106
+ Controls verbosity of output.
107
+
108
+ importance_getter : str or callable, default='auto'
109
+ If 'auto', uses the feature importance either through a `coef_`
110
+ or `feature_importances_` attributes of estimator.
111
+
112
+ Also accepts a string that specifies an attribute name/path
113
+ for extracting feature importance (implemented with `attrgetter`).
114
+ For example, give `regressor_.coef_` in case of
115
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
116
+ `named_steps.clf.feature_importances_` in case of
117
+ class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
118
+
119
+ If `callable`, overrides the default feature importance getter.
120
+ The callable is passed with the fitted estimator and it should
121
+ return importance for each feature.
122
+
123
+ .. versionadded:: 0.24
124
+
125
+ Attributes
126
+ ----------
127
+ classes_ : ndarray of shape (n_classes,)
128
+ The classes labels. Only available when `estimator` is a classifier.
129
+
130
+ estimator_ : ``Estimator`` instance
131
+ The fitted estimator used to select features.
132
+
133
+ n_features_ : int
134
+ The number of selected features.
135
+
136
+ n_features_in_ : int
137
+ Number of features seen during :term:`fit`. Only defined if the
138
+ underlying estimator exposes such an attribute when fit.
139
+
140
+ .. versionadded:: 0.24
141
+
142
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
143
+ Names of features seen during :term:`fit`. Defined only when `X`
144
+ has feature names that are all strings.
145
+
146
+ .. versionadded:: 1.0
147
+
148
+ ranking_ : ndarray of shape (n_features,)
149
+ The feature ranking, such that ``ranking_[i]`` corresponds to the
150
+ ranking position of the i-th feature. Selected (i.e., estimated
151
+ best) features are assigned rank 1.
152
+
153
+ support_ : ndarray of shape (n_features,)
154
+ The mask of selected features.
155
+
156
+ See Also
157
+ --------
158
+ RFECV : Recursive feature elimination with built-in cross-validated
159
+ selection of the best number of features.
160
+ SelectFromModel : Feature selection based on thresholds of importance
161
+ weights.
162
+ SequentialFeatureSelector : Sequential cross-validation based feature
163
+ selection. Does not rely on importance weights.
164
+
165
+ Notes
166
+ -----
167
+ Allows NaN/Inf in the input if the underlying estimator does as well.
168
+
169
+ References
170
+ ----------
171
+
172
+ .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
173
+ for cancer classification using support vector machines",
174
+ Mach. Learn., 46(1-3), 389--422, 2002.
175
+
176
+ Examples
177
+ --------
178
+ The following example shows how to retrieve the 5 most informative
179
+ features in the Friedman #1 dataset.
180
+
181
+ >>> from sklearn.datasets import make_friedman1
182
+ >>> from sklearn.feature_selection import RFE
183
+ >>> from sklearn.svm import SVR
184
+ >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
185
+ >>> estimator = SVR(kernel="linear")
186
+ >>> selector = RFE(estimator, n_features_to_select=5, step=1)
187
+ >>> selector = selector.fit(X, y)
188
+ >>> selector.support_
189
+ array([ True, True, True, True, True, False, False, False, False,
190
+ False])
191
+ >>> selector.ranking_
192
+ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
193
+ """
194
+
195
+ _parameter_constraints: dict = {
196
+ "estimator": [HasMethods(["fit"])],
197
+ "n_features_to_select": [
198
+ None,
199
+ Interval(RealNotInt, 0, 1, closed="right"),
200
+ Interval(Integral, 0, None, closed="neither"),
201
+ ],
202
+ "step": [
203
+ Interval(Integral, 0, None, closed="neither"),
204
+ Interval(RealNotInt, 0, 1, closed="neither"),
205
+ ],
206
+ "verbose": ["verbose"],
207
+ "importance_getter": [str, callable],
208
+ }
209
+
210
+ def __init__(
211
+ self,
212
+ estimator,
213
+ *,
214
+ n_features_to_select=None,
215
+ step=1,
216
+ verbose=0,
217
+ importance_getter="auto",
218
+ ):
219
+ self.estimator = estimator
220
+ self.n_features_to_select = n_features_to_select
221
+ self.step = step
222
+ self.importance_getter = importance_getter
223
+ self.verbose = verbose
224
+
225
+ @property
226
+ def _estimator_type(self):
227
+ return self.estimator._estimator_type
228
+
229
+ @property
230
+ def classes_(self):
231
+ """Classes labels available when `estimator` is a classifier.
232
+
233
+ Returns
234
+ -------
235
+ ndarray of shape (n_classes,)
236
+ """
237
+ return self.estimator_.classes_
238
+
239
+ @_fit_context(
240
+ # RFE.estimator is not validated yet
241
+ prefer_skip_nested_validation=False
242
+ )
243
+ def fit(self, X, y, **fit_params):
244
+ """Fit the RFE model and then the underlying estimator on the selected features.
245
+
246
+ Parameters
247
+ ----------
248
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
249
+ The training input samples.
250
+
251
+ y : array-like of shape (n_samples,)
252
+ The target values.
253
+
254
+ **fit_params : dict
255
+ Additional parameters passed to the `fit` method of the underlying
256
+ estimator.
257
+
258
+ Returns
259
+ -------
260
+ self : object
261
+ Fitted estimator.
262
+ """
263
+ _raise_for_unsupported_routing(self, "fit", **fit_params)
264
+ return self._fit(X, y, **fit_params)
265
+
266
+ def _fit(self, X, y, step_score=None, **fit_params):
267
+ # Parameter step_score controls the calculation of self.scores_
268
+ # step_score is not exposed to users
269
+ # and is used when implementing RFECV
270
+ # self.scores_ will not be calculated when calling _fit through fit
271
+
272
+ X, y = self._validate_data(
273
+ X,
274
+ y,
275
+ accept_sparse="csc",
276
+ ensure_min_features=2,
277
+ force_all_finite=False,
278
+ multi_output=True,
279
+ )
280
+
281
+ # Initialization
282
+ n_features = X.shape[1]
283
+ if self.n_features_to_select is None:
284
+ n_features_to_select = n_features // 2
285
+ elif isinstance(self.n_features_to_select, Integral): # int
286
+ n_features_to_select = self.n_features_to_select
287
+ else: # float
288
+ n_features_to_select = int(n_features * self.n_features_to_select)
289
+
290
+ if 0.0 < self.step < 1.0:
291
+ step = int(max(1, self.step * n_features))
292
+ else:
293
+ step = int(self.step)
294
+
295
+ support_ = np.ones(n_features, dtype=bool)
296
+ ranking_ = np.ones(n_features, dtype=int)
297
+
298
+ if step_score:
299
+ self.scores_ = []
300
+
301
+ # Elimination
302
+ while np.sum(support_) > n_features_to_select:
303
+ # Remaining features
304
+ features = np.arange(n_features)[support_]
305
+
306
+ # Rank the remaining features
307
+ estimator = clone(self.estimator)
308
+ if self.verbose > 0:
309
+ print("Fitting estimator with %d features." % np.sum(support_))
310
+
311
+ estimator.fit(X[:, features], y, **fit_params)
312
+
313
+ # Get importance and rank them
314
+ importances = _get_feature_importances(
315
+ estimator,
316
+ self.importance_getter,
317
+ transform_func="square",
318
+ )
319
+ ranks = np.argsort(importances)
320
+
321
+ # for sparse case ranks is matrix
322
+ ranks = np.ravel(ranks)
323
+
324
+ # Eliminate the worse features
325
+ threshold = min(step, np.sum(support_) - n_features_to_select)
326
+
327
+ # Compute step score on the previous selection iteration
328
+ # because 'estimator' must use features
329
+ # that have not been eliminated yet
330
+ if step_score:
331
+ self.scores_.append(step_score(estimator, features))
332
+ support_[features[ranks][:threshold]] = False
333
+ ranking_[np.logical_not(support_)] += 1
334
+
335
+ # Set final attributes
336
+ features = np.arange(n_features)[support_]
337
+ self.estimator_ = clone(self.estimator)
338
+ self.estimator_.fit(X[:, features], y, **fit_params)
339
+
340
+ # Compute step score when only n_features_to_select features left
341
+ if step_score:
342
+ self.scores_.append(step_score(self.estimator_, features))
343
+ self.n_features_ = support_.sum()
344
+ self.support_ = support_
345
+ self.ranking_ = ranking_
346
+
347
+ return self
348
+
349
+ @available_if(_estimator_has("predict"))
350
+ def predict(self, X):
351
+ """Reduce X to the selected features and predict using the estimator.
352
+
353
+ Parameters
354
+ ----------
355
+ X : array of shape [n_samples, n_features]
356
+ The input samples.
357
+
358
+ Returns
359
+ -------
360
+ y : array of shape [n_samples]
361
+ The predicted target values.
362
+ """
363
+ check_is_fitted(self)
364
+ return self.estimator_.predict(self.transform(X))
365
+
366
+ @available_if(_estimator_has("score"))
367
+ def score(self, X, y, **fit_params):
368
+ """Reduce X to the selected features and return the score of the estimator.
369
+
370
+ Parameters
371
+ ----------
372
+ X : array of shape [n_samples, n_features]
373
+ The input samples.
374
+
375
+ y : array of shape [n_samples]
376
+ The target values.
377
+
378
+ **fit_params : dict
379
+ Parameters to pass to the `score` method of the underlying
380
+ estimator.
381
+
382
+ .. versionadded:: 1.0
383
+
384
+ Returns
385
+ -------
386
+ score : float
387
+ Score of the underlying base estimator computed with the selected
388
+ features returned by `rfe.transform(X)` and `y`.
389
+ """
390
+ check_is_fitted(self)
391
+ return self.estimator_.score(self.transform(X), y, **fit_params)
392
+
393
+ def _get_support_mask(self):
394
+ check_is_fitted(self)
395
+ return self.support_
396
+
397
+ @available_if(_estimator_has("decision_function"))
398
+ def decision_function(self, X):
399
+ """Compute the decision function of ``X``.
400
+
401
+ Parameters
402
+ ----------
403
+ X : {array-like or sparse matrix} of shape (n_samples, n_features)
404
+ The input samples. Internally, it will be converted to
405
+ ``dtype=np.float32`` and if a sparse matrix is provided
406
+ to a sparse ``csr_matrix``.
407
+
408
+ Returns
409
+ -------
410
+ score : array, shape = [n_samples, n_classes] or [n_samples]
411
+ The decision function of the input samples. The order of the
412
+ classes corresponds to that in the attribute :term:`classes_`.
413
+ Regression and binary classification produce an array of shape
414
+ [n_samples].
415
+ """
416
+ check_is_fitted(self)
417
+ return self.estimator_.decision_function(self.transform(X))
418
+
419
+ @available_if(_estimator_has("predict_proba"))
420
+ def predict_proba(self, X):
421
+ """Predict class probabilities for X.
422
+
423
+ Parameters
424
+ ----------
425
+ X : {array-like or sparse matrix} of shape (n_samples, n_features)
426
+ The input samples. Internally, it will be converted to
427
+ ``dtype=np.float32`` and if a sparse matrix is provided
428
+ to a sparse ``csr_matrix``.
429
+
430
+ Returns
431
+ -------
432
+ p : array of shape (n_samples, n_classes)
433
+ The class probabilities of the input samples. The order of the
434
+ classes corresponds to that in the attribute :term:`classes_`.
435
+ """
436
+ check_is_fitted(self)
437
+ return self.estimator_.predict_proba(self.transform(X))
438
+
439
+ @available_if(_estimator_has("predict_log_proba"))
440
+ def predict_log_proba(self, X):
441
+ """Predict class log-probabilities for X.
442
+
443
+ Parameters
444
+ ----------
445
+ X : array of shape [n_samples, n_features]
446
+ The input samples.
447
+
448
+ Returns
449
+ -------
450
+ p : array of shape (n_samples, n_classes)
451
+ The class log-probabilities of the input samples. The order of the
452
+ classes corresponds to that in the attribute :term:`classes_`.
453
+ """
454
+ check_is_fitted(self)
455
+ return self.estimator_.predict_log_proba(self.transform(X))
456
+
457
+ def _more_tags(self):
458
+ tags = {
459
+ "poor_score": True,
460
+ "requires_y": True,
461
+ "allow_nan": True,
462
+ }
463
+
464
+ # Adjust allow_nan if estimator explicitly defines `allow_nan`.
465
+ if hasattr(self.estimator, "_get_tags"):
466
+ tags["allow_nan"] = self.estimator._get_tags()["allow_nan"]
467
+
468
+ return tags
469
+
470
+
471
+ class RFECV(RFE):
472
+ """Recursive feature elimination with cross-validation to select features.
473
+
474
+ The number of features selected is tuned automatically by fitting an :class:`RFE`
475
+ selector on the different cross-validation splits (provided by the `cv` parameter).
476
+ The performance of the :class:`RFE` selector are evaluated using `scorer` for
477
+ different number of selected features and aggregated together. Finally, the scores
478
+ are averaged across folds and the number of features selected is set to the number
479
+ of features that maximize the cross-validation score.
480
+ See glossary entry for :term:`cross-validation estimator`.
481
+
482
+ Read more in the :ref:`User Guide <rfe>`.
483
+
484
+ Parameters
485
+ ----------
486
+ estimator : ``Estimator`` instance
487
+ A supervised learning estimator with a ``fit`` method that provides
488
+ information about feature importance either through a ``coef_``
489
+ attribute or through a ``feature_importances_`` attribute.
490
+
491
+ step : int or float, default=1
492
+ If greater than or equal to 1, then ``step`` corresponds to the
493
+ (integer) number of features to remove at each iteration.
494
+ If within (0.0, 1.0), then ``step`` corresponds to the percentage
495
+ (rounded down) of features to remove at each iteration.
496
+ Note that the last iteration may remove fewer than ``step`` features in
497
+ order to reach ``min_features_to_select``.
498
+
499
+ min_features_to_select : int, default=1
500
+ The minimum number of features to be selected. This number of features
501
+ will always be scored, even if the difference between the original
502
+ feature count and ``min_features_to_select`` isn't divisible by
503
+ ``step``.
504
+
505
+ .. versionadded:: 0.20
506
+
507
+ cv : int, cross-validation generator or an iterable, default=None
508
+ Determines the cross-validation splitting strategy.
509
+ Possible inputs for cv are:
510
+
511
+ - None, to use the default 5-fold cross-validation,
512
+ - integer, to specify the number of folds.
513
+ - :term:`CV splitter`,
514
+ - An iterable yielding (train, test) splits as arrays of indices.
515
+
516
+ For integer/None inputs, if ``y`` is binary or multiclass,
517
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. If the
518
+ estimator is a classifier or if ``y`` is neither binary nor multiclass,
519
+ :class:`~sklearn.model_selection.KFold` is used.
520
+
521
+ Refer :ref:`User Guide <cross_validation>` for the various
522
+ cross-validation strategies that can be used here.
523
+
524
+ .. versionchanged:: 0.22
525
+ ``cv`` default value of None changed from 3-fold to 5-fold.
526
+
527
+ scoring : str, callable or None, default=None
528
+ A string (see model evaluation documentation) or
529
+ a scorer callable object / function with signature
530
+ ``scorer(estimator, X, y)``.
531
+
532
+ verbose : int, default=0
533
+ Controls verbosity of output.
534
+
535
+ n_jobs : int or None, default=None
536
+ Number of cores to run in parallel while fitting across folds.
537
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
538
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
539
+ for more details.
540
+
541
+ .. versionadded:: 0.18
542
+
543
+ importance_getter : str or callable, default='auto'
544
+ If 'auto', uses the feature importance either through a `coef_`
545
+ or `feature_importances_` attributes of estimator.
546
+
547
+ Also accepts a string that specifies an attribute name/path
548
+ for extracting feature importance.
549
+ For example, give `regressor_.coef_` in case of
550
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
551
+ `named_steps.clf.feature_importances_` in case of
552
+ :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
553
+
554
+ If `callable`, overrides the default feature importance getter.
555
+ The callable is passed with the fitted estimator and it should
556
+ return importance for each feature.
557
+
558
+ .. versionadded:: 0.24
559
+
560
+ Attributes
561
+ ----------
562
+ classes_ : ndarray of shape (n_classes,)
563
+ The classes labels. Only available when `estimator` is a classifier.
564
+
565
+ estimator_ : ``Estimator`` instance
566
+ The fitted estimator used to select features.
567
+
568
+ cv_results_ : dict of ndarrays
569
+ A dict with keys:
570
+
571
+ split(k)_test_score : ndarray of shape (n_subsets_of_features,)
572
+ The cross-validation scores across (k)th fold.
573
+
574
+ mean_test_score : ndarray of shape (n_subsets_of_features,)
575
+ Mean of scores over the folds.
576
+
577
+ std_test_score : ndarray of shape (n_subsets_of_features,)
578
+ Standard deviation of scores over the folds.
579
+
580
+ .. versionadded:: 1.0
581
+
582
+ n_features_ : int
583
+ The number of selected features with cross-validation.
584
+
585
+ n_features_in_ : int
586
+ Number of features seen during :term:`fit`. Only defined if the
587
+ underlying estimator exposes such an attribute when fit.
588
+
589
+ .. versionadded:: 0.24
590
+
591
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
592
+ Names of features seen during :term:`fit`. Defined only when `X`
593
+ has feature names that are all strings.
594
+
595
+ .. versionadded:: 1.0
596
+
597
+ ranking_ : narray of shape (n_features,)
598
+ The feature ranking, such that `ranking_[i]`
599
+ corresponds to the ranking
600
+ position of the i-th feature.
601
+ Selected (i.e., estimated best)
602
+ features are assigned rank 1.
603
+
604
+ support_ : ndarray of shape (n_features,)
605
+ The mask of selected features.
606
+
607
+ See Also
608
+ --------
609
+ RFE : Recursive feature elimination.
610
+
611
+ Notes
612
+ -----
613
+ The size of all values in ``cv_results_`` is equal to
614
+ ``ceil((n_features - min_features_to_select) / step) + 1``,
615
+ where step is the number of features removed at each iteration.
616
+
617
+ Allows NaN/Inf in the input if the underlying estimator does as well.
618
+
619
+ References
620
+ ----------
621
+
622
+ .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
623
+ for cancer classification using support vector machines",
624
+ Mach. Learn., 46(1-3), 389--422, 2002.
625
+
626
+ Examples
627
+ --------
628
+ The following example shows how to retrieve the a-priori not known 5
629
+ informative features in the Friedman #1 dataset.
630
+
631
+ >>> from sklearn.datasets import make_friedman1
632
+ >>> from sklearn.feature_selection import RFECV
633
+ >>> from sklearn.svm import SVR
634
+ >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
635
+ >>> estimator = SVR(kernel="linear")
636
+ >>> selector = RFECV(estimator, step=1, cv=5)
637
+ >>> selector = selector.fit(X, y)
638
+ >>> selector.support_
639
+ array([ True, True, True, True, True, False, False, False, False,
640
+ False])
641
+ >>> selector.ranking_
642
+ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
643
+ """
644
+
645
+ _parameter_constraints: dict = {
646
+ **RFE._parameter_constraints,
647
+ "min_features_to_select": [Interval(Integral, 0, None, closed="neither")],
648
+ "cv": ["cv_object"],
649
+ "scoring": [None, str, callable],
650
+ "n_jobs": [None, Integral],
651
+ }
652
+ _parameter_constraints.pop("n_features_to_select")
653
+
654
+ def __init__(
655
+ self,
656
+ estimator,
657
+ *,
658
+ step=1,
659
+ min_features_to_select=1,
660
+ cv=None,
661
+ scoring=None,
662
+ verbose=0,
663
+ n_jobs=None,
664
+ importance_getter="auto",
665
+ ):
666
+ self.estimator = estimator
667
+ self.step = step
668
+ self.importance_getter = importance_getter
669
+ self.cv = cv
670
+ self.scoring = scoring
671
+ self.verbose = verbose
672
+ self.n_jobs = n_jobs
673
+ self.min_features_to_select = min_features_to_select
674
+
675
+ @_fit_context(
676
+ # RFECV.estimator is not validated yet
677
+ prefer_skip_nested_validation=False
678
+ )
679
+ def fit(self, X, y, groups=None):
680
+ """Fit the RFE model and automatically tune the number of selected features.
681
+
682
+ Parameters
683
+ ----------
684
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
685
+ Training vector, where `n_samples` is the number of samples and
686
+ `n_features` is the total number of features.
687
+
688
+ y : array-like of shape (n_samples,)
689
+ Target values (integers for classification, real numbers for
690
+ regression).
691
+
692
+ groups : array-like of shape (n_samples,) or None, default=None
693
+ Group labels for the samples used while splitting the dataset into
694
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
695
+ instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
696
+
697
+ .. versionadded:: 0.20
698
+
699
+ Returns
700
+ -------
701
+ self : object
702
+ Fitted estimator.
703
+ """
704
+ _raise_for_unsupported_routing(self, "fit", groups=groups)
705
+ X, y = self._validate_data(
706
+ X,
707
+ y,
708
+ accept_sparse="csr",
709
+ ensure_min_features=2,
710
+ force_all_finite=False,
711
+ multi_output=True,
712
+ )
713
+
714
+ # Initialization
715
+ cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
716
+ scorer = check_scoring(self.estimator, scoring=self.scoring)
717
+ n_features = X.shape[1]
718
+
719
+ if 0.0 < self.step < 1.0:
720
+ step = int(max(1, self.step * n_features))
721
+ else:
722
+ step = int(self.step)
723
+
724
+ # Build an RFE object, which will evaluate and score each possible
725
+ # feature count, down to self.min_features_to_select
726
+ rfe = RFE(
727
+ estimator=self.estimator,
728
+ n_features_to_select=self.min_features_to_select,
729
+ importance_getter=self.importance_getter,
730
+ step=self.step,
731
+ verbose=self.verbose,
732
+ )
733
+
734
+ # Determine the number of subsets of features by fitting across
735
+ # the train folds and choosing the "features_to_select" parameter
736
+ # that gives the least averaged error across all folds.
737
+
738
+ # Note that joblib raises a non-picklable error for bound methods
739
+ # even if n_jobs is set to 1 with the default multiprocessing
740
+ # backend.
741
+ # This branching is done so that to
742
+ # make sure that user code that sets n_jobs to 1
743
+ # and provides bound methods as scorers is not broken with the
744
+ # addition of n_jobs parameter in version 0.18.
745
+
746
+ if effective_n_jobs(self.n_jobs) == 1:
747
+ parallel, func = list, _rfe_single_fit
748
+ else:
749
+ parallel = Parallel(n_jobs=self.n_jobs)
750
+ func = delayed(_rfe_single_fit)
751
+
752
+ scores = parallel(
753
+ func(rfe, self.estimator, X, y, train, test, scorer)
754
+ for train, test in cv.split(X, y, groups)
755
+ )
756
+
757
+ scores = np.array(scores)
758
+ scores_sum = np.sum(scores, axis=0)
759
+ scores_sum_rev = scores_sum[::-1]
760
+ argmax_idx = len(scores_sum) - np.argmax(scores_sum_rev) - 1
761
+ n_features_to_select = max(
762
+ n_features - (argmax_idx * step), self.min_features_to_select
763
+ )
764
+
765
+ # Re-execute an elimination with best_k over the whole set
766
+ rfe = RFE(
767
+ estimator=self.estimator,
768
+ n_features_to_select=n_features_to_select,
769
+ step=self.step,
770
+ importance_getter=self.importance_getter,
771
+ verbose=self.verbose,
772
+ )
773
+
774
+ rfe.fit(X, y)
775
+
776
+ # Set final attributes
777
+ self.support_ = rfe.support_
778
+ self.n_features_ = rfe.n_features_
779
+ self.ranking_ = rfe.ranking_
780
+ self.estimator_ = clone(self.estimator)
781
+ self.estimator_.fit(self._transform(X), y)
782
+
783
+ # reverse to stay consistent with before
784
+ scores_rev = scores[:, ::-1]
785
+ self.cv_results_ = {}
786
+ self.cv_results_["mean_test_score"] = np.mean(scores_rev, axis=0)
787
+ self.cv_results_["std_test_score"] = np.std(scores_rev, axis=0)
788
+
789
+ for i in range(scores.shape[0]):
790
+ self.cv_results_[f"split{i}_test_score"] = scores_rev[i]
791
+
792
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sequential feature selection
3
+ """
4
+ from numbers import Integral, Real
5
+
6
+ import numpy as np
7
+
8
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
9
+ from ..metrics import get_scorer_names
10
+ from ..model_selection import check_cv, cross_val_score
11
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
12
+ from ..utils._tags import _safe_tags
13
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
14
+ from ..utils.validation import check_is_fitted
15
+ from ._base import SelectorMixin
16
+
17
+
18
+ class SequentialFeatureSelector(
19
+ _RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator
20
+ ):
21
+ """Transformer that performs Sequential Feature Selection.
22
+
23
+ This Sequential Feature Selector adds (forward selection) or
24
+ removes (backward selection) features to form a feature subset in a
25
+ greedy fashion. At each stage, this estimator chooses the best feature to
26
+ add or remove based on the cross-validation score of an estimator. In
27
+ the case of unsupervised learning, this Sequential Feature Selector
28
+ looks only at the features (X), not the desired outputs (y).
29
+
30
+ Read more in the :ref:`User Guide <sequential_feature_selection>`.
31
+
32
+ .. versionadded:: 0.24
33
+
34
+ Parameters
35
+ ----------
36
+ estimator : estimator instance
37
+ An unfitted estimator.
38
+
39
+ n_features_to_select : "auto", int or float, default="auto"
40
+ If `"auto"`, the behaviour depends on the `tol` parameter:
41
+
42
+ - if `tol` is not `None`, then features are selected while the score
43
+ change does not exceed `tol`.
44
+ - otherwise, half of the features are selected.
45
+
46
+ If integer, the parameter is the absolute number of features to select.
47
+ If float between 0 and 1, it is the fraction of features to select.
48
+
49
+ .. versionadded:: 1.1
50
+ The option `"auto"` was added in version 1.1.
51
+
52
+ .. versionchanged:: 1.3
53
+ The default changed from `"warn"` to `"auto"` in 1.3.
54
+
55
+ tol : float, default=None
56
+ If the score is not incremented by at least `tol` between two
57
+ consecutive feature additions or removals, stop adding or removing.
58
+
59
+ `tol` can be negative when removing features using `direction="backward"`.
60
+ It can be useful to reduce the number of features at the cost of a small
61
+ decrease in the score.
62
+
63
+ `tol` is enabled only when `n_features_to_select` is `"auto"`.
64
+
65
+ .. versionadded:: 1.1
66
+
67
+ direction : {'forward', 'backward'}, default='forward'
68
+ Whether to perform forward selection or backward selection.
69
+
70
+ scoring : str or callable, default=None
71
+ A single str (see :ref:`scoring_parameter`) or a callable
72
+ (see :ref:`scoring`) to evaluate the predictions on the test set.
73
+
74
+ NOTE that when using a custom scorer, it should return a single
75
+ value.
76
+
77
+ If None, the estimator's score method is used.
78
+
79
+ cv : int, cross-validation generator or an iterable, default=None
80
+ Determines the cross-validation splitting strategy.
81
+ Possible inputs for cv are:
82
+
83
+ - None, to use the default 5-fold cross validation,
84
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
85
+ - :term:`CV splitter`,
86
+ - An iterable yielding (train, test) splits as arrays of indices.
87
+
88
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
89
+ either binary or multiclass,
90
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other
91
+ cases, :class:`~sklearn.model_selection.KFold` is used. These splitters
92
+ are instantiated with `shuffle=False` so the splits will be the same
93
+ across calls.
94
+
95
+ Refer :ref:`User Guide <cross_validation>` for the various
96
+ cross-validation strategies that can be used here.
97
+
98
+ n_jobs : int, default=None
99
+ Number of jobs to run in parallel. When evaluating a new feature to
100
+ add or remove, the cross-validation procedure is parallel over the
101
+ folds.
102
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
103
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
104
+ for more details.
105
+
106
+ Attributes
107
+ ----------
108
+ n_features_in_ : int
109
+ Number of features seen during :term:`fit`. Only defined if the
110
+ underlying estimator exposes such an attribute when fit.
111
+
112
+ .. versionadded:: 0.24
113
+
114
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
115
+ Names of features seen during :term:`fit`. Defined only when `X`
116
+ has feature names that are all strings.
117
+
118
+ .. versionadded:: 1.0
119
+
120
+ n_features_to_select_ : int
121
+ The number of features that were selected.
122
+
123
+ support_ : ndarray of shape (n_features,), dtype=bool
124
+ The mask of selected features.
125
+
126
+ See Also
127
+ --------
128
+ GenericUnivariateSelect : Univariate feature selector with configurable
129
+ strategy.
130
+ RFE : Recursive feature elimination based on importance weights.
131
+ RFECV : Recursive feature elimination based on importance weights, with
132
+ automatic selection of the number of features.
133
+ SelectFromModel : Feature selection based on thresholds of importance
134
+ weights.
135
+
136
+ Examples
137
+ --------
138
+ >>> from sklearn.feature_selection import SequentialFeatureSelector
139
+ >>> from sklearn.neighbors import KNeighborsClassifier
140
+ >>> from sklearn.datasets import load_iris
141
+ >>> X, y = load_iris(return_X_y=True)
142
+ >>> knn = KNeighborsClassifier(n_neighbors=3)
143
+ >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3)
144
+ >>> sfs.fit(X, y)
145
+ SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3),
146
+ n_features_to_select=3)
147
+ >>> sfs.get_support()
148
+ array([ True, False, True, True])
149
+ >>> sfs.transform(X).shape
150
+ (150, 3)
151
+ """
152
+
153
+ _parameter_constraints: dict = {
154
+ "estimator": [HasMethods(["fit"])],
155
+ "n_features_to_select": [
156
+ StrOptions({"auto"}),
157
+ Interval(RealNotInt, 0, 1, closed="right"),
158
+ Interval(Integral, 0, None, closed="neither"),
159
+ ],
160
+ "tol": [None, Interval(Real, None, None, closed="neither")],
161
+ "direction": [StrOptions({"forward", "backward"})],
162
+ "scoring": [None, StrOptions(set(get_scorer_names())), callable],
163
+ "cv": ["cv_object"],
164
+ "n_jobs": [None, Integral],
165
+ }
166
+
167
+ def __init__(
168
+ self,
169
+ estimator,
170
+ *,
171
+ n_features_to_select="auto",
172
+ tol=None,
173
+ direction="forward",
174
+ scoring=None,
175
+ cv=5,
176
+ n_jobs=None,
177
+ ):
178
+ self.estimator = estimator
179
+ self.n_features_to_select = n_features_to_select
180
+ self.tol = tol
181
+ self.direction = direction
182
+ self.scoring = scoring
183
+ self.cv = cv
184
+ self.n_jobs = n_jobs
185
+
186
+ @_fit_context(
187
+ # SequentialFeatureSelector.estimator is not validated yet
188
+ prefer_skip_nested_validation=False
189
+ )
190
+ def fit(self, X, y=None):
191
+ """Learn the features to select from X.
192
+
193
+ Parameters
194
+ ----------
195
+ X : array-like of shape (n_samples, n_features)
196
+ Training vectors, where `n_samples` is the number of samples and
197
+ `n_features` is the number of predictors.
198
+
199
+ y : array-like of shape (n_samples,), default=None
200
+ Target values. This parameter may be ignored for
201
+ unsupervised learning.
202
+
203
+ Returns
204
+ -------
205
+ self : object
206
+ Returns the instance itself.
207
+ """
208
+ tags = self._get_tags()
209
+ X = self._validate_data(
210
+ X,
211
+ accept_sparse="csc",
212
+ ensure_min_features=2,
213
+ force_all_finite=not tags.get("allow_nan", True),
214
+ )
215
+ n_features = X.shape[1]
216
+
217
+ if self.n_features_to_select == "auto":
218
+ if self.tol is not None:
219
+ # With auto feature selection, `n_features_to_select_` will be updated
220
+ # to `support_.sum()` after features are selected.
221
+ self.n_features_to_select_ = n_features - 1
222
+ else:
223
+ self.n_features_to_select_ = n_features // 2
224
+ elif isinstance(self.n_features_to_select, Integral):
225
+ if self.n_features_to_select >= n_features:
226
+ raise ValueError("n_features_to_select must be < n_features.")
227
+ self.n_features_to_select_ = self.n_features_to_select
228
+ elif isinstance(self.n_features_to_select, Real):
229
+ self.n_features_to_select_ = int(n_features * self.n_features_to_select)
230
+
231
+ if self.tol is not None and self.tol < 0 and self.direction == "forward":
232
+ raise ValueError("tol must be positive when doing forward selection")
233
+
234
+ cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
235
+
236
+ cloned_estimator = clone(self.estimator)
237
+
238
+ # the current mask corresponds to the set of features:
239
+ # - that we have already *selected* if we do forward selection
240
+ # - that we have already *excluded* if we do backward selection
241
+ current_mask = np.zeros(shape=n_features, dtype=bool)
242
+ n_iterations = (
243
+ self.n_features_to_select_
244
+ if self.n_features_to_select == "auto" or self.direction == "forward"
245
+ else n_features - self.n_features_to_select_
246
+ )
247
+
248
+ old_score = -np.inf
249
+ is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
250
+ for _ in range(n_iterations):
251
+ new_feature_idx, new_score = self._get_best_new_feature_score(
252
+ cloned_estimator, X, y, cv, current_mask
253
+ )
254
+ if is_auto_select and ((new_score - old_score) < self.tol):
255
+ break
256
+
257
+ old_score = new_score
258
+ current_mask[new_feature_idx] = True
259
+
260
+ if self.direction == "backward":
261
+ current_mask = ~current_mask
262
+
263
+ self.support_ = current_mask
264
+ self.n_features_to_select_ = self.support_.sum()
265
+
266
+ return self
267
+
268
+ def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask):
269
+ # Return the best new feature and its score to add to the current_mask,
270
+ # i.e. return the best new feature and its score to add (resp. remove)
271
+ # when doing forward selection (resp. backward selection).
272
+ # Feature will be added if the current score and past score are greater
273
+ # than tol when n_feature is auto,
274
+ candidate_feature_indices = np.flatnonzero(~current_mask)
275
+ scores = {}
276
+ for feature_idx in candidate_feature_indices:
277
+ candidate_mask = current_mask.copy()
278
+ candidate_mask[feature_idx] = True
279
+ if self.direction == "backward":
280
+ candidate_mask = ~candidate_mask
281
+ X_new = X[:, candidate_mask]
282
+ scores[feature_idx] = cross_val_score(
283
+ estimator,
284
+ X_new,
285
+ y,
286
+ cv=cv,
287
+ scoring=self.scoring,
288
+ n_jobs=self.n_jobs,
289
+ ).mean()
290
+ new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx])
291
+ return new_feature_idx, scores[new_feature_idx]
292
+
293
+ def _get_support_mask(self):
294
+ check_is_fitted(self)
295
+ return self.support_
296
+
297
+ def _more_tags(self):
298
+ return {
299
+ "allow_nan": _safe_tags(self.estimator, key="allow_nan"),
300
+ }
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py ADDED
@@ -0,0 +1,1161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Univariate features selection."""
2
+
3
+ # Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
4
+ # L. Buitinck, A. Joly
5
+ # License: BSD 3 clause
6
+
7
+
8
+ import warnings
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ from scipy import special, stats
13
+ from scipy.sparse import issparse
14
+
15
+ from ..base import BaseEstimator, _fit_context
16
+ from ..preprocessing import LabelBinarizer
17
+ from ..utils import as_float_array, check_array, check_X_y, safe_mask, safe_sqr
18
+ from ..utils._param_validation import Interval, StrOptions, validate_params
19
+ from ..utils.extmath import row_norms, safe_sparse_dot
20
+ from ..utils.validation import check_is_fitted
21
+ from ._base import SelectorMixin
22
+
23
+
24
+ def _clean_nans(scores):
25
+ """
26
+ Fixes Issue #1240: NaNs can't be properly compared, so change them to the
27
+ smallest value of scores's dtype. -inf seems to be unreliable.
28
+ """
29
+ # XXX where should this function be called? fit? scoring functions
30
+ # themselves?
31
+ scores = as_float_array(scores, copy=True)
32
+ scores[np.isnan(scores)] = np.finfo(scores.dtype).min
33
+ return scores
34
+
35
+
36
+ ######################################################################
37
+ # Scoring functions
38
+
39
+
40
+ # The following function is a rewriting of scipy.stats.f_oneway
41
+ # Contrary to the scipy.stats.f_oneway implementation it does not
42
+ # copy the data while keeping the inputs unchanged.
43
+ def f_oneway(*args):
44
+ """Perform a 1-way ANOVA.
45
+
46
+ The one-way ANOVA tests the null hypothesis that 2 or more groups have
47
+ the same population mean. The test is applied to samples from two or
48
+ more groups, possibly with differing sizes.
49
+
50
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
51
+
52
+ Parameters
53
+ ----------
54
+ *args : {array-like, sparse matrix}
55
+ Sample1, sample2... The sample measurements should be given as
56
+ arguments.
57
+
58
+ Returns
59
+ -------
60
+ f_statistic : float
61
+ The computed F-value of the test.
62
+ p_value : float
63
+ The associated p-value from the F-distribution.
64
+
65
+ Notes
66
+ -----
67
+ The ANOVA test has important assumptions that must be satisfied in order
68
+ for the associated p-value to be valid.
69
+
70
+ 1. The samples are independent
71
+ 2. Each sample is from a normally distributed population
72
+ 3. The population standard deviations of the groups are all equal. This
73
+ property is known as homoscedasticity.
74
+
75
+ If these assumptions are not true for a given set of data, it may still be
76
+ possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
77
+ with some loss of power.
78
+
79
+ The algorithm is from Heiman[2], pp.394-7.
80
+
81
+ See ``scipy.stats.f_oneway`` that should give the same results while
82
+ being less efficient.
83
+
84
+ References
85
+ ----------
86
+ .. [1] Lowry, Richard. "Concepts and Applications of Inferential
87
+ Statistics". Chapter 14.
88
+ http://vassarstats.net/textbook
89
+
90
+ .. [2] Heiman, G.W. Research Methods in Statistics. 2002.
91
+ """
92
+ n_classes = len(args)
93
+ args = [as_float_array(a) for a in args]
94
+ n_samples_per_class = np.array([a.shape[0] for a in args])
95
+ n_samples = np.sum(n_samples_per_class)
96
+ ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
97
+ sums_args = [np.asarray(a.sum(axis=0)) for a in args]
98
+ square_of_sums_alldata = sum(sums_args) ** 2
99
+ square_of_sums_args = [s**2 for s in sums_args]
100
+ sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
101
+ ssbn = 0.0
102
+ for k, _ in enumerate(args):
103
+ ssbn += square_of_sums_args[k] / n_samples_per_class[k]
104
+ ssbn -= square_of_sums_alldata / float(n_samples)
105
+ sswn = sstot - ssbn
106
+ dfbn = n_classes - 1
107
+ dfwn = n_samples - n_classes
108
+ msb = ssbn / float(dfbn)
109
+ msw = sswn / float(dfwn)
110
+ constant_features_idx = np.where(msw == 0.0)[0]
111
+ if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size:
112
+ warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
113
+ f = msb / msw
114
+ # flatten matrix to vector in sparse case
115
+ f = np.asarray(f).ravel()
116
+ prob = special.fdtrc(dfbn, dfwn, f)
117
+ return f, prob
118
+
119
+
120
+ @validate_params(
121
+ {
122
+ "X": ["array-like", "sparse matrix"],
123
+ "y": ["array-like"],
124
+ },
125
+ prefer_skip_nested_validation=True,
126
+ )
127
+ def f_classif(X, y):
128
+ """Compute the ANOVA F-value for the provided sample.
129
+
130
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
131
+
132
+ Parameters
133
+ ----------
134
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
135
+ The set of regressors that will be tested sequentially.
136
+
137
+ y : array-like of shape (n_samples,)
138
+ The target vector.
139
+
140
+ Returns
141
+ -------
142
+ f_statistic : ndarray of shape (n_features,)
143
+ F-statistic for each feature.
144
+
145
+ p_values : ndarray of shape (n_features,)
146
+ P-values associated with the F-statistic.
147
+
148
+ See Also
149
+ --------
150
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
151
+ f_regression : F-value between label/feature for regression tasks.
152
+
153
+ Examples
154
+ --------
155
+ >>> from sklearn.datasets import make_classification
156
+ >>> from sklearn.feature_selection import f_classif
157
+ >>> X, y = make_classification(
158
+ ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
159
+ ... shuffle=False, random_state=42
160
+ ... )
161
+ >>> f_statistic, p_values = f_classif(X, y)
162
+ >>> f_statistic
163
+ array([2.2...e+02, 7.0...e-01, 1.6...e+00, 9.3...e-01,
164
+ 5.4...e+00, 3.2...e-01, 4.7...e-02, 5.7...e-01,
165
+ 7.5...e-01, 8.9...e-02])
166
+ >>> p_values
167
+ array([7.1...e-27, 4.0...e-01, 1.9...e-01, 3.3...e-01,
168
+ 2.2...e-02, 5.7...e-01, 8.2...e-01, 4.5...e-01,
169
+ 3.8...e-01, 7.6...e-01])
170
+ """
171
+ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"])
172
+ args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
173
+ return f_oneway(*args)
174
+
175
+
176
+ def _chisquare(f_obs, f_exp):
177
+ """Fast replacement for scipy.stats.chisquare.
178
+
179
+ Version from https://github.com/scipy/scipy/pull/2525 with additional
180
+ optimizations.
181
+ """
182
+ f_obs = np.asarray(f_obs, dtype=np.float64)
183
+
184
+ k = len(f_obs)
185
+ # Reuse f_obs for chi-squared statistics
186
+ chisq = f_obs
187
+ chisq -= f_exp
188
+ chisq **= 2
189
+ with np.errstate(invalid="ignore"):
190
+ chisq /= f_exp
191
+ chisq = chisq.sum(axis=0)
192
+ return chisq, special.chdtrc(k - 1, chisq)
193
+
194
+
195
+ @validate_params(
196
+ {
197
+ "X": ["array-like", "sparse matrix"],
198
+ "y": ["array-like"],
199
+ },
200
+ prefer_skip_nested_validation=True,
201
+ )
202
+ def chi2(X, y):
203
+ """Compute chi-squared stats between each non-negative feature and class.
204
+
205
+ This score can be used to select the `n_features` features with the
206
+ highest values for the test chi-squared statistic from X, which must
207
+ contain only **non-negative features** such as booleans or frequencies
208
+ (e.g., term counts in document classification), relative to the classes.
209
+
210
+ Recall that the chi-square test measures dependence between stochastic
211
+ variables, so using this function "weeds out" the features that are the
212
+ most likely to be independent of class and therefore irrelevant for
213
+ classification.
214
+
215
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
216
+
217
+ Parameters
218
+ ----------
219
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
220
+ Sample vectors.
221
+
222
+ y : array-like of shape (n_samples,)
223
+ Target vector (class labels).
224
+
225
+ Returns
226
+ -------
227
+ chi2 : ndarray of shape (n_features,)
228
+ Chi2 statistics for each feature.
229
+
230
+ p_values : ndarray of shape (n_features,)
231
+ P-values for each feature.
232
+
233
+ See Also
234
+ --------
235
+ f_classif : ANOVA F-value between label/feature for classification tasks.
236
+ f_regression : F-value between label/feature for regression tasks.
237
+
238
+ Notes
239
+ -----
240
+ Complexity of this algorithm is O(n_classes * n_features).
241
+
242
+ Examples
243
+ --------
244
+ >>> import numpy as np
245
+ >>> from sklearn.feature_selection import chi2
246
+ >>> X = np.array([[1, 1, 3],
247
+ ... [0, 1, 5],
248
+ ... [5, 4, 1],
249
+ ... [6, 6, 2],
250
+ ... [1, 4, 0],
251
+ ... [0, 0, 0]])
252
+ >>> y = np.array([1, 1, 0, 0, 2, 2])
253
+ >>> chi2_stats, p_values = chi2(X, y)
254
+ >>> chi2_stats
255
+ array([15.3..., 6.5 , 8.9...])
256
+ >>> p_values
257
+ array([0.0004..., 0.0387..., 0.0116... ])
258
+ """
259
+
260
+ # XXX: we might want to do some of the following in logspace instead for
261
+ # numerical stability.
262
+ # Converting X to float allows getting better performance for the
263
+ # safe_sparse_dot call made below.
264
+ X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32))
265
+ if np.any((X.data if issparse(X) else X) < 0):
266
+ raise ValueError("Input X must be non-negative.")
267
+
268
+ # Use a sparse representation for Y by default to reduce memory usage when
269
+ # y has many unique classes.
270
+ Y = LabelBinarizer(sparse_output=True).fit_transform(y)
271
+ if Y.shape[1] == 1:
272
+ Y = Y.toarray()
273
+ Y = np.append(1 - Y, Y, axis=1)
274
+
275
+ observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
276
+
277
+ if issparse(observed):
278
+ # convert back to a dense array before calling _chisquare
279
+ # XXX: could _chisquare be reimplement to accept sparse matrices for
280
+ # cases where both n_classes and n_features are large (and X is
281
+ # sparse)?
282
+ observed = observed.toarray()
283
+
284
+ feature_count = X.sum(axis=0).reshape(1, -1)
285
+ class_prob = Y.mean(axis=0).reshape(1, -1)
286
+ expected = np.dot(class_prob.T, feature_count)
287
+
288
+ return _chisquare(observed, expected)
289
+
290
+
291
+ @validate_params(
292
+ {
293
+ "X": ["array-like", "sparse matrix"],
294
+ "y": ["array-like"],
295
+ "center": ["boolean"],
296
+ "force_finite": ["boolean"],
297
+ },
298
+ prefer_skip_nested_validation=True,
299
+ )
300
+ def r_regression(X, y, *, center=True, force_finite=True):
301
+ """Compute Pearson's r for each features and the target.
302
+
303
+ Pearson's r is also known as the Pearson correlation coefficient.
304
+
305
+ Linear model for testing the individual effect of each of many regressors.
306
+ This is a scoring function to be used in a feature selection procedure, not
307
+ a free standing feature selection procedure.
308
+
309
+ The cross correlation between each regressor and the target is computed
310
+ as::
311
+
312
+ E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
313
+
314
+ For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
315
+
316
+ .. versionadded:: 1.0
317
+
318
+ Parameters
319
+ ----------
320
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
321
+ The data matrix.
322
+
323
+ y : array-like of shape (n_samples,)
324
+ The target vector.
325
+
326
+ center : bool, default=True
327
+ Whether or not to center the data matrix `X` and the target vector `y`.
328
+ By default, `X` and `y` will be centered.
329
+
330
+ force_finite : bool, default=True
331
+ Whether or not to force the Pearson's R correlation to be finite.
332
+ In the particular case where some features in `X` or the target `y`
333
+ are constant, the Pearson's R correlation is not defined. When
334
+ `force_finite=False`, a correlation of `np.nan` is returned to
335
+ acknowledge this case. When `force_finite=True`, this value will be
336
+ forced to a minimal correlation of `0.0`.
337
+
338
+ .. versionadded:: 1.1
339
+
340
+ Returns
341
+ -------
342
+ correlation_coefficient : ndarray of shape (n_features,)
343
+ Pearson's R correlation coefficients of features.
344
+
345
+ See Also
346
+ --------
347
+ f_regression: Univariate linear regression tests returning f-statistic
348
+ and p-values.
349
+ mutual_info_regression: Mutual information for a continuous target.
350
+ f_classif: ANOVA F-value between label/feature for classification tasks.
351
+ chi2: Chi-squared stats of non-negative features for classification tasks.
352
+
353
+ Examples
354
+ --------
355
+ >>> from sklearn.datasets import make_regression
356
+ >>> from sklearn.feature_selection import r_regression
357
+ >>> X, y = make_regression(
358
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
359
+ ... )
360
+ >>> r_regression(X, y)
361
+ array([-0.15..., 1. , -0.22...])
362
+ """
363
+ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64)
364
+ n_samples = X.shape[0]
365
+
366
+ # Compute centered values
367
+ # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
368
+ # need not center X
369
+ if center:
370
+ y = y - np.mean(y)
371
+ # TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays.
372
+ # Here, we check the output of the `.mean` operation that returns a `np.matrix`
373
+ # for sparse matrices while a `np.array` for dense and sparse arrays.
374
+ # We can reconsider using `isspmatrix` when the minimum version is
375
+ # SciPy >= 1.11
376
+ X_means = X.mean(axis=0)
377
+ X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means
378
+ # Compute the scaled standard deviations via moments
379
+ X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2)
380
+ else:
381
+ X_norms = row_norms(X.T)
382
+
383
+ correlation_coefficient = safe_sparse_dot(y, X)
384
+ with np.errstate(divide="ignore", invalid="ignore"):
385
+ correlation_coefficient /= X_norms
386
+ correlation_coefficient /= np.linalg.norm(y)
387
+
388
+ if force_finite and not np.isfinite(correlation_coefficient).all():
389
+ # case where the target or some features are constant
390
+ # the correlation coefficient(s) is/are set to the minimum (i.e. 0.0)
391
+ nan_mask = np.isnan(correlation_coefficient)
392
+ correlation_coefficient[nan_mask] = 0.0
393
+ return correlation_coefficient
394
+
395
+
396
+ @validate_params(
397
+ {
398
+ "X": ["array-like", "sparse matrix"],
399
+ "y": ["array-like"],
400
+ "center": ["boolean"],
401
+ "force_finite": ["boolean"],
402
+ },
403
+ prefer_skip_nested_validation=True,
404
+ )
405
+ def f_regression(X, y, *, center=True, force_finite=True):
406
+ """Univariate linear regression tests returning F-statistic and p-values.
407
+
408
+ Quick linear model for testing the effect of a single regressor,
409
+ sequentially for many regressors.
410
+
411
+ This is done in 2 steps:
412
+
413
+ 1. The cross correlation between each regressor and the target is computed
414
+ using :func:`r_regression` as::
415
+
416
+ E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
417
+
418
+ 2. It is converted to an F score and then to a p-value.
419
+
420
+ :func:`f_regression` is derived from :func:`r_regression` and will rank
421
+ features in the same order if all the features are positively correlated
422
+ with the target.
423
+
424
+ Note however that contrary to :func:`f_regression`, :func:`r_regression`
425
+ values lie in [-1, 1] and can thus be negative. :func:`f_regression` is
426
+ therefore recommended as a feature selection criterion to identify
427
+ potentially predictive feature for a downstream classifier, irrespective of
428
+ the sign of the association with the target variable.
429
+
430
+ Furthermore :func:`f_regression` returns p-values while
431
+ :func:`r_regression` does not.
432
+
433
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
434
+
435
+ Parameters
436
+ ----------
437
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
438
+ The data matrix.
439
+
440
+ y : array-like of shape (n_samples,)
441
+ The target vector.
442
+
443
+ center : bool, default=True
444
+ Whether or not to center the data matrix `X` and the target vector `y`.
445
+ By default, `X` and `y` will be centered.
446
+
447
+ force_finite : bool, default=True
448
+ Whether or not to force the F-statistics and associated p-values to
449
+ be finite. There are two cases where the F-statistic is expected to not
450
+ be finite:
451
+
452
+ - when the target `y` or some features in `X` are constant. In this
453
+ case, the Pearson's R correlation is not defined leading to obtain
454
+ `np.nan` values in the F-statistic and p-value. When
455
+ `force_finite=True`, the F-statistic is set to `0.0` and the
456
+ associated p-value is set to `1.0`.
457
+ - when a feature in `X` is perfectly correlated (or
458
+ anti-correlated) with the target `y`. In this case, the F-statistic
459
+ is expected to be `np.inf`. When `force_finite=True`, the F-statistic
460
+ is set to `np.finfo(dtype).max` and the associated p-value is set to
461
+ `0.0`.
462
+
463
+ .. versionadded:: 1.1
464
+
465
+ Returns
466
+ -------
467
+ f_statistic : ndarray of shape (n_features,)
468
+ F-statistic for each feature.
469
+
470
+ p_values : ndarray of shape (n_features,)
471
+ P-values associated with the F-statistic.
472
+
473
+ See Also
474
+ --------
475
+ r_regression: Pearson's R between label/feature for regression tasks.
476
+ f_classif: ANOVA F-value between label/feature for classification tasks.
477
+ chi2: Chi-squared stats of non-negative features for classification tasks.
478
+ SelectKBest: Select features based on the k highest scores.
479
+ SelectFpr: Select features based on a false positive rate test.
480
+ SelectFdr: Select features based on an estimated false discovery rate.
481
+ SelectFwe: Select features based on family-wise error rate.
482
+ SelectPercentile: Select features based on percentile of the highest
483
+ scores.
484
+
485
+ Examples
486
+ --------
487
+ >>> from sklearn.datasets import make_regression
488
+ >>> from sklearn.feature_selection import f_regression
489
+ >>> X, y = make_regression(
490
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
491
+ ... )
492
+ >>> f_statistic, p_values = f_regression(X, y)
493
+ >>> f_statistic
494
+ array([1.2...+00, 2.6...+13, 2.6...+00])
495
+ >>> p_values
496
+ array([2.7..., 1.5..., 1.0...])
497
+ """
498
+ correlation_coefficient = r_regression(
499
+ X, y, center=center, force_finite=force_finite
500
+ )
501
+ deg_of_freedom = y.size - (2 if center else 1)
502
+
503
+ corr_coef_squared = correlation_coefficient**2
504
+
505
+ with np.errstate(divide="ignore", invalid="ignore"):
506
+ f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom
507
+ p_values = stats.f.sf(f_statistic, 1, deg_of_freedom)
508
+
509
+ if force_finite and not np.isfinite(f_statistic).all():
510
+ # case where there is a perfect (anti-)correlation
511
+ # f-statistics can be set to the maximum and p-values to zero
512
+ mask_inf = np.isinf(f_statistic)
513
+ f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max
514
+ # case where the target or some features are constant
515
+ # f-statistics would be minimum and thus p-values large
516
+ mask_nan = np.isnan(f_statistic)
517
+ f_statistic[mask_nan] = 0.0
518
+ p_values[mask_nan] = 1.0
519
+ return f_statistic, p_values
520
+
521
+
522
+ ######################################################################
523
+ # Base classes
524
+
525
+
526
+ class _BaseFilter(SelectorMixin, BaseEstimator):
527
+ """Initialize the univariate feature selection.
528
+
529
+ Parameters
530
+ ----------
531
+ score_func : callable
532
+ Function taking two arrays X and y, and returning a pair of arrays
533
+ (scores, pvalues) or a single array with scores.
534
+ """
535
+
536
+ _parameter_constraints: dict = {"score_func": [callable]}
537
+
538
+ def __init__(self, score_func):
539
+ self.score_func = score_func
540
+
541
+ @_fit_context(prefer_skip_nested_validation=True)
542
+ def fit(self, X, y=None):
543
+ """Run score function on (X, y) and get the appropriate features.
544
+
545
+ Parameters
546
+ ----------
547
+ X : array-like of shape (n_samples, n_features)
548
+ The training input samples.
549
+
550
+ y : array-like of shape (n_samples,) or None
551
+ The target values (class labels in classification, real numbers in
552
+ regression). If the selector is unsupervised then `y` can be set to `None`.
553
+
554
+ Returns
555
+ -------
556
+ self : object
557
+ Returns the instance itself.
558
+ """
559
+ if y is None:
560
+ X = self._validate_data(X, accept_sparse=["csr", "csc"])
561
+ else:
562
+ X, y = self._validate_data(
563
+ X, y, accept_sparse=["csr", "csc"], multi_output=True
564
+ )
565
+
566
+ self._check_params(X, y)
567
+ score_func_ret = self.score_func(X, y)
568
+ if isinstance(score_func_ret, (list, tuple)):
569
+ self.scores_, self.pvalues_ = score_func_ret
570
+ self.pvalues_ = np.asarray(self.pvalues_)
571
+ else:
572
+ self.scores_ = score_func_ret
573
+ self.pvalues_ = None
574
+
575
+ self.scores_ = np.asarray(self.scores_)
576
+
577
+ return self
578
+
579
+ def _check_params(self, X, y):
580
+ pass
581
+
582
+ def _more_tags(self):
583
+ return {"requires_y": True}
584
+
585
+
586
+ ######################################################################
587
+ # Specific filters
588
+ ######################################################################
589
+ class SelectPercentile(_BaseFilter):
590
+ """Select features according to a percentile of the highest scores.
591
+
592
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
593
+
594
+ Parameters
595
+ ----------
596
+ score_func : callable, default=f_classif
597
+ Function taking two arrays X and y, and returning a pair of arrays
598
+ (scores, pvalues) or a single array with scores.
599
+ Default is f_classif (see below "See Also"). The default function only
600
+ works with classification tasks.
601
+
602
+ .. versionadded:: 0.18
603
+
604
+ percentile : int, default=10
605
+ Percent of features to keep.
606
+
607
+ Attributes
608
+ ----------
609
+ scores_ : array-like of shape (n_features,)
610
+ Scores of features.
611
+
612
+ pvalues_ : array-like of shape (n_features,)
613
+ p-values of feature scores, None if `score_func` returned only scores.
614
+
615
+ n_features_in_ : int
616
+ Number of features seen during :term:`fit`.
617
+
618
+ .. versionadded:: 0.24
619
+
620
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
621
+ Names of features seen during :term:`fit`. Defined only when `X`
622
+ has feature names that are all strings.
623
+
624
+ .. versionadded:: 1.0
625
+
626
+ See Also
627
+ --------
628
+ f_classif : ANOVA F-value between label/feature for classification tasks.
629
+ mutual_info_classif : Mutual information for a discrete target.
630
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
631
+ f_regression : F-value between label/feature for regression tasks.
632
+ mutual_info_regression : Mutual information for a continuous target.
633
+ SelectKBest : Select features based on the k highest scores.
634
+ SelectFpr : Select features based on a false positive rate test.
635
+ SelectFdr : Select features based on an estimated false discovery rate.
636
+ SelectFwe : Select features based on family-wise error rate.
637
+ GenericUnivariateSelect : Univariate feature selector with configurable
638
+ mode.
639
+
640
+ Notes
641
+ -----
642
+ Ties between features with equal scores will be broken in an unspecified
643
+ way.
644
+
645
+ This filter supports unsupervised feature selection that only requests `X` for
646
+ computing the scores.
647
+
648
+ Examples
649
+ --------
650
+ >>> from sklearn.datasets import load_digits
651
+ >>> from sklearn.feature_selection import SelectPercentile, chi2
652
+ >>> X, y = load_digits(return_X_y=True)
653
+ >>> X.shape
654
+ (1797, 64)
655
+ >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y)
656
+ >>> X_new.shape
657
+ (1797, 7)
658
+ """
659
+
660
+ _parameter_constraints: dict = {
661
+ **_BaseFilter._parameter_constraints,
662
+ "percentile": [Interval(Real, 0, 100, closed="both")],
663
+ }
664
+
665
+ def __init__(self, score_func=f_classif, *, percentile=10):
666
+ super().__init__(score_func=score_func)
667
+ self.percentile = percentile
668
+
669
+ def _get_support_mask(self):
670
+ check_is_fitted(self)
671
+
672
+ # Cater for NaNs
673
+ if self.percentile == 100:
674
+ return np.ones(len(self.scores_), dtype=bool)
675
+ elif self.percentile == 0:
676
+ return np.zeros(len(self.scores_), dtype=bool)
677
+
678
+ scores = _clean_nans(self.scores_)
679
+ threshold = np.percentile(scores, 100 - self.percentile)
680
+ mask = scores > threshold
681
+ ties = np.where(scores == threshold)[0]
682
+ if len(ties):
683
+ max_feats = int(len(scores) * self.percentile / 100)
684
+ kept_ties = ties[: max_feats - mask.sum()]
685
+ mask[kept_ties] = True
686
+ return mask
687
+
688
+ def _more_tags(self):
689
+ return {"requires_y": False}
690
+
691
+
692
+ class SelectKBest(_BaseFilter):
693
+ """Select features according to the k highest scores.
694
+
695
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
696
+
697
+ Parameters
698
+ ----------
699
+ score_func : callable, default=f_classif
700
+ Function taking two arrays X and y, and returning a pair of arrays
701
+ (scores, pvalues) or a single array with scores.
702
+ Default is f_classif (see below "See Also"). The default function only
703
+ works with classification tasks.
704
+
705
+ .. versionadded:: 0.18
706
+
707
+ k : int or "all", default=10
708
+ Number of top features to select.
709
+ The "all" option bypasses selection, for use in a parameter search.
710
+
711
+ Attributes
712
+ ----------
713
+ scores_ : array-like of shape (n_features,)
714
+ Scores of features.
715
+
716
+ pvalues_ : array-like of shape (n_features,)
717
+ p-values of feature scores, None if `score_func` returned only scores.
718
+
719
+ n_features_in_ : int
720
+ Number of features seen during :term:`fit`.
721
+
722
+ .. versionadded:: 0.24
723
+
724
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
725
+ Names of features seen during :term:`fit`. Defined only when `X`
726
+ has feature names that are all strings.
727
+
728
+ .. versionadded:: 1.0
729
+
730
+ See Also
731
+ --------
732
+ f_classif: ANOVA F-value between label/feature for classification tasks.
733
+ mutual_info_classif: Mutual information for a discrete target.
734
+ chi2: Chi-squared stats of non-negative features for classification tasks.
735
+ f_regression: F-value between label/feature for regression tasks.
736
+ mutual_info_regression: Mutual information for a continuous target.
737
+ SelectPercentile: Select features based on percentile of the highest
738
+ scores.
739
+ SelectFpr : Select features based on a false positive rate test.
740
+ SelectFdr : Select features based on an estimated false discovery rate.
741
+ SelectFwe : Select features based on family-wise error rate.
742
+ GenericUnivariateSelect : Univariate feature selector with configurable
743
+ mode.
744
+
745
+ Notes
746
+ -----
747
+ Ties between features with equal scores will be broken in an unspecified
748
+ way.
749
+
750
+ This filter supports unsupervised feature selection that only requests `X` for
751
+ computing the scores.
752
+
753
+ Examples
754
+ --------
755
+ >>> from sklearn.datasets import load_digits
756
+ >>> from sklearn.feature_selection import SelectKBest, chi2
757
+ >>> X, y = load_digits(return_X_y=True)
758
+ >>> X.shape
759
+ (1797, 64)
760
+ >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y)
761
+ >>> X_new.shape
762
+ (1797, 20)
763
+ """
764
+
765
+ _parameter_constraints: dict = {
766
+ **_BaseFilter._parameter_constraints,
767
+ "k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")],
768
+ }
769
+
770
+ def __init__(self, score_func=f_classif, *, k=10):
771
+ super().__init__(score_func=score_func)
772
+ self.k = k
773
+
774
+ def _check_params(self, X, y):
775
+ if not isinstance(self.k, str) and self.k > X.shape[1]:
776
+ warnings.warn(
777
+ f"k={self.k} is greater than n_features={X.shape[1]}. "
778
+ "All the features will be returned."
779
+ )
780
+
781
+ def _get_support_mask(self):
782
+ check_is_fitted(self)
783
+
784
+ if self.k == "all":
785
+ return np.ones(self.scores_.shape, dtype=bool)
786
+ elif self.k == 0:
787
+ return np.zeros(self.scores_.shape, dtype=bool)
788
+ else:
789
+ scores = _clean_nans(self.scores_)
790
+ mask = np.zeros(scores.shape, dtype=bool)
791
+
792
+ # Request a stable sort. Mergesort takes more memory (~40MB per
793
+ # megafeature on x86-64).
794
+ mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1
795
+ return mask
796
+
797
+ def _more_tags(self):
798
+ return {"requires_y": False}
799
+
800
+
801
+ class SelectFpr(_BaseFilter):
802
+ """Filter: Select the pvalues below alpha based on a FPR test.
803
+
804
+ FPR test stands for False Positive Rate test. It controls the total
805
+ amount of false detections.
806
+
807
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
808
+
809
+ Parameters
810
+ ----------
811
+ score_func : callable, default=f_classif
812
+ Function taking two arrays X and y, and returning a pair of arrays
813
+ (scores, pvalues).
814
+ Default is f_classif (see below "See Also"). The default function only
815
+ works with classification tasks.
816
+
817
+ alpha : float, default=5e-2
818
+ Features with p-values less than `alpha` are selected.
819
+
820
+ Attributes
821
+ ----------
822
+ scores_ : array-like of shape (n_features,)
823
+ Scores of features.
824
+
825
+ pvalues_ : array-like of shape (n_features,)
826
+ p-values of feature scores.
827
+
828
+ n_features_in_ : int
829
+ Number of features seen during :term:`fit`.
830
+
831
+ .. versionadded:: 0.24
832
+
833
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
834
+ Names of features seen during :term:`fit`. Defined only when `X`
835
+ has feature names that are all strings.
836
+
837
+ .. versionadded:: 1.0
838
+
839
+ See Also
840
+ --------
841
+ f_classif : ANOVA F-value between label/feature for classification tasks.
842
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
843
+ mutual_info_classif: Mutual information for a discrete target.
844
+ f_regression : F-value between label/feature for regression tasks.
845
+ mutual_info_regression : Mutual information for a continuous target.
846
+ SelectPercentile : Select features based on percentile of the highest
847
+ scores.
848
+ SelectKBest : Select features based on the k highest scores.
849
+ SelectFdr : Select features based on an estimated false discovery rate.
850
+ SelectFwe : Select features based on family-wise error rate.
851
+ GenericUnivariateSelect : Univariate feature selector with configurable
852
+ mode.
853
+
854
+ Examples
855
+ --------
856
+ >>> from sklearn.datasets import load_breast_cancer
857
+ >>> from sklearn.feature_selection import SelectFpr, chi2
858
+ >>> X, y = load_breast_cancer(return_X_y=True)
859
+ >>> X.shape
860
+ (569, 30)
861
+ >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y)
862
+ >>> X_new.shape
863
+ (569, 16)
864
+ """
865
+
866
+ _parameter_constraints: dict = {
867
+ **_BaseFilter._parameter_constraints,
868
+ "alpha": [Interval(Real, 0, 1, closed="both")],
869
+ }
870
+
871
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
872
+ super().__init__(score_func=score_func)
873
+ self.alpha = alpha
874
+
875
+ def _get_support_mask(self):
876
+ check_is_fitted(self)
877
+
878
+ return self.pvalues_ < self.alpha
879
+
880
+
881
+ class SelectFdr(_BaseFilter):
882
+ """Filter: Select the p-values for an estimated false discovery rate.
883
+
884
+ This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
885
+ on the expected false discovery rate.
886
+
887
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
888
+
889
+ Parameters
890
+ ----------
891
+ score_func : callable, default=f_classif
892
+ Function taking two arrays X and y, and returning a pair of arrays
893
+ (scores, pvalues).
894
+ Default is f_classif (see below "See Also"). The default function only
895
+ works with classification tasks.
896
+
897
+ alpha : float, default=5e-2
898
+ The highest uncorrected p-value for features to keep.
899
+
900
+ Attributes
901
+ ----------
902
+ scores_ : array-like of shape (n_features,)
903
+ Scores of features.
904
+
905
+ pvalues_ : array-like of shape (n_features,)
906
+ p-values of feature scores.
907
+
908
+ n_features_in_ : int
909
+ Number of features seen during :term:`fit`.
910
+
911
+ .. versionadded:: 0.24
912
+
913
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
914
+ Names of features seen during :term:`fit`. Defined only when `X`
915
+ has feature names that are all strings.
916
+
917
+ .. versionadded:: 1.0
918
+
919
+ See Also
920
+ --------
921
+ f_classif : ANOVA F-value between label/feature for classification tasks.
922
+ mutual_info_classif : Mutual information for a discrete target.
923
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
924
+ f_regression : F-value between label/feature for regression tasks.
925
+ mutual_info_regression : Mutual information for a continuous target.
926
+ SelectPercentile : Select features based on percentile of the highest
927
+ scores.
928
+ SelectKBest : Select features based on the k highest scores.
929
+ SelectFpr : Select features based on a false positive rate test.
930
+ SelectFwe : Select features based on family-wise error rate.
931
+ GenericUnivariateSelect : Univariate feature selector with configurable
932
+ mode.
933
+
934
+ References
935
+ ----------
936
+ https://en.wikipedia.org/wiki/False_discovery_rate
937
+
938
+ Examples
939
+ --------
940
+ >>> from sklearn.datasets import load_breast_cancer
941
+ >>> from sklearn.feature_selection import SelectFdr, chi2
942
+ >>> X, y = load_breast_cancer(return_X_y=True)
943
+ >>> X.shape
944
+ (569, 30)
945
+ >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y)
946
+ >>> X_new.shape
947
+ (569, 16)
948
+ """
949
+
950
+ _parameter_constraints: dict = {
951
+ **_BaseFilter._parameter_constraints,
952
+ "alpha": [Interval(Real, 0, 1, closed="both")],
953
+ }
954
+
955
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
956
+ super().__init__(score_func=score_func)
957
+ self.alpha = alpha
958
+
959
+ def _get_support_mask(self):
960
+ check_is_fitted(self)
961
+
962
+ n_features = len(self.pvalues_)
963
+ sv = np.sort(self.pvalues_)
964
+ selected = sv[
965
+ sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1)
966
+ ]
967
+ if selected.size == 0:
968
+ return np.zeros_like(self.pvalues_, dtype=bool)
969
+ return self.pvalues_ <= selected.max()
970
+
971
+
972
+ class SelectFwe(_BaseFilter):
973
+ """Filter: Select the p-values corresponding to Family-wise error rate.
974
+
975
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
976
+
977
+ Parameters
978
+ ----------
979
+ score_func : callable, default=f_classif
980
+ Function taking two arrays X and y, and returning a pair of arrays
981
+ (scores, pvalues).
982
+ Default is f_classif (see below "See Also"). The default function only
983
+ works with classification tasks.
984
+
985
+ alpha : float, default=5e-2
986
+ The highest uncorrected p-value for features to keep.
987
+
988
+ Attributes
989
+ ----------
990
+ scores_ : array-like of shape (n_features,)
991
+ Scores of features.
992
+
993
+ pvalues_ : array-like of shape (n_features,)
994
+ p-values of feature scores.
995
+
996
+ n_features_in_ : int
997
+ Number of features seen during :term:`fit`.
998
+
999
+ .. versionadded:: 0.24
1000
+
1001
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1002
+ Names of features seen during :term:`fit`. Defined only when `X`
1003
+ has feature names that are all strings.
1004
+
1005
+ .. versionadded:: 1.0
1006
+
1007
+ See Also
1008
+ --------
1009
+ f_classif : ANOVA F-value between label/feature for classification tasks.
1010
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
1011
+ f_regression : F-value between label/feature for regression tasks.
1012
+ SelectPercentile : Select features based on percentile of the highest
1013
+ scores.
1014
+ SelectKBest : Select features based on the k highest scores.
1015
+ SelectFpr : Select features based on a false positive rate test.
1016
+ SelectFdr : Select features based on an estimated false discovery rate.
1017
+ GenericUnivariateSelect : Univariate feature selector with configurable
1018
+ mode.
1019
+
1020
+ Examples
1021
+ --------
1022
+ >>> from sklearn.datasets import load_breast_cancer
1023
+ >>> from sklearn.feature_selection import SelectFwe, chi2
1024
+ >>> X, y = load_breast_cancer(return_X_y=True)
1025
+ >>> X.shape
1026
+ (569, 30)
1027
+ >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y)
1028
+ >>> X_new.shape
1029
+ (569, 15)
1030
+ """
1031
+
1032
+ _parameter_constraints: dict = {
1033
+ **_BaseFilter._parameter_constraints,
1034
+ "alpha": [Interval(Real, 0, 1, closed="both")],
1035
+ }
1036
+
1037
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
1038
+ super().__init__(score_func=score_func)
1039
+ self.alpha = alpha
1040
+
1041
+ def _get_support_mask(self):
1042
+ check_is_fitted(self)
1043
+
1044
+ return self.pvalues_ < self.alpha / len(self.pvalues_)
1045
+
1046
+
1047
+ ######################################################################
1048
+ # Generic filter
1049
+ ######################################################################
1050
+
1051
+
1052
+ # TODO this class should fit on either p-values or scores,
1053
+ # depending on the mode.
1054
+ class GenericUnivariateSelect(_BaseFilter):
1055
+ """Univariate feature selector with configurable strategy.
1056
+
1057
+ Read more in the :ref:`User Guide <univariate_feature_selection>`.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+ score_func : callable, default=f_classif
1062
+ Function taking two arrays X and y, and returning a pair of arrays
1063
+ (scores, pvalues). For modes 'percentile' or 'kbest' it can return
1064
+ a single array scores.
1065
+
1066
+ mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'
1067
+ Feature selection mode. Note that the `'percentile'` and `'kbest'`
1068
+ modes are supporting unsupervised feature selection (when `y` is `None`).
1069
+
1070
+ param : "all", float or int, default=1e-5
1071
+ Parameter of the corresponding mode.
1072
+
1073
+ Attributes
1074
+ ----------
1075
+ scores_ : array-like of shape (n_features,)
1076
+ Scores of features.
1077
+
1078
+ pvalues_ : array-like of shape (n_features,)
1079
+ p-values of feature scores, None if `score_func` returned scores only.
1080
+
1081
+ n_features_in_ : int
1082
+ Number of features seen during :term:`fit`.
1083
+
1084
+ .. versionadded:: 0.24
1085
+
1086
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1087
+ Names of features seen during :term:`fit`. Defined only when `X`
1088
+ has feature names that are all strings.
1089
+
1090
+ .. versionadded:: 1.0
1091
+
1092
+ See Also
1093
+ --------
1094
+ f_classif : ANOVA F-value between label/feature for classification tasks.
1095
+ mutual_info_classif : Mutual information for a discrete target.
1096
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
1097
+ f_regression : F-value between label/feature for regression tasks.
1098
+ mutual_info_regression : Mutual information for a continuous target.
1099
+ SelectPercentile : Select features based on percentile of the highest
1100
+ scores.
1101
+ SelectKBest : Select features based on the k highest scores.
1102
+ SelectFpr : Select features based on a false positive rate test.
1103
+ SelectFdr : Select features based on an estimated false discovery rate.
1104
+ SelectFwe : Select features based on family-wise error rate.
1105
+
1106
+ Examples
1107
+ --------
1108
+ >>> from sklearn.datasets import load_breast_cancer
1109
+ >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2
1110
+ >>> X, y = load_breast_cancer(return_X_y=True)
1111
+ >>> X.shape
1112
+ (569, 30)
1113
+ >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20)
1114
+ >>> X_new = transformer.fit_transform(X, y)
1115
+ >>> X_new.shape
1116
+ (569, 20)
1117
+ """
1118
+
1119
+ _selection_modes: dict = {
1120
+ "percentile": SelectPercentile,
1121
+ "k_best": SelectKBest,
1122
+ "fpr": SelectFpr,
1123
+ "fdr": SelectFdr,
1124
+ "fwe": SelectFwe,
1125
+ }
1126
+
1127
+ _parameter_constraints: dict = {
1128
+ **_BaseFilter._parameter_constraints,
1129
+ "mode": [StrOptions(set(_selection_modes.keys()))],
1130
+ "param": [Interval(Real, 0, None, closed="left"), StrOptions({"all"})],
1131
+ }
1132
+
1133
+ def __init__(self, score_func=f_classif, *, mode="percentile", param=1e-5):
1134
+ super().__init__(score_func=score_func)
1135
+ self.mode = mode
1136
+ self.param = param
1137
+
1138
+ def _make_selector(self):
1139
+ selector = self._selection_modes[self.mode](score_func=self.score_func)
1140
+
1141
+ # Now perform some acrobatics to set the right named parameter in
1142
+ # the selector
1143
+ possible_params = selector._get_param_names()
1144
+ possible_params.remove("score_func")
1145
+ selector.set_params(**{possible_params[0]: self.param})
1146
+
1147
+ return selector
1148
+
1149
+ def _more_tags(self):
1150
+ return {"preserves_dtype": [np.float64, np.float32]}
1151
+
1152
+ def _check_params(self, X, y):
1153
+ self._make_selector()._check_params(X, y)
1154
+
1155
+ def _get_support_mask(self):
1156
+ check_is_fitted(self)
1157
+
1158
+ selector = self._make_selector()
1159
+ selector.pvalues_ = self.pvalues_
1160
+ selector.scores_ = self.scores_
1161
+ return selector._get_support_mask()
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Lars Buitinck
2
+ # License: 3-clause BSD
3
+ from numbers import Real
4
+
5
+ import numpy as np
6
+
7
+ from ..base import BaseEstimator, _fit_context
8
+ from ..utils._param_validation import Interval
9
+ from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
10
+ from ..utils.validation import check_is_fitted
11
+ from ._base import SelectorMixin
12
+
13
+
14
+ class VarianceThreshold(SelectorMixin, BaseEstimator):
15
+ """Feature selector that removes all low-variance features.
16
+
17
+ This feature selection algorithm looks only at the features (X), not the
18
+ desired outputs (y), and can thus be used for unsupervised learning.
19
+
20
+ Read more in the :ref:`User Guide <variance_threshold>`.
21
+
22
+ Parameters
23
+ ----------
24
+ threshold : float, default=0
25
+ Features with a training-set variance lower than this threshold will
26
+ be removed. The default is to keep all features with non-zero variance,
27
+ i.e. remove the features that have the same value in all samples.
28
+
29
+ Attributes
30
+ ----------
31
+ variances_ : array, shape (n_features,)
32
+ Variances of individual features.
33
+
34
+ n_features_in_ : int
35
+ Number of features seen during :term:`fit`.
36
+
37
+ .. versionadded:: 0.24
38
+
39
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
40
+ Names of features seen during :term:`fit`. Defined only when `X`
41
+ has feature names that are all strings.
42
+
43
+ .. versionadded:: 1.0
44
+
45
+ See Also
46
+ --------
47
+ SelectFromModel: Meta-transformer for selecting features based on
48
+ importance weights.
49
+ SelectPercentile : Select features according to a percentile of the highest
50
+ scores.
51
+ SequentialFeatureSelector : Transformer that performs Sequential Feature
52
+ Selection.
53
+
54
+ Notes
55
+ -----
56
+ Allows NaN in the input.
57
+ Raises ValueError if no feature in X meets the variance threshold.
58
+
59
+ Examples
60
+ --------
61
+ The following dataset has integer features, two of which are the same
62
+ in every sample. These are removed with the default setting for threshold::
63
+
64
+ >>> from sklearn.feature_selection import VarianceThreshold
65
+ >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
66
+ >>> selector = VarianceThreshold()
67
+ >>> selector.fit_transform(X)
68
+ array([[2, 0],
69
+ [1, 4],
70
+ [1, 1]])
71
+ """
72
+
73
+ _parameter_constraints: dict = {
74
+ "threshold": [Interval(Real, 0, None, closed="left")]
75
+ }
76
+
77
+ def __init__(self, threshold=0.0):
78
+ self.threshold = threshold
79
+
80
+ @_fit_context(prefer_skip_nested_validation=True)
81
+ def fit(self, X, y=None):
82
+ """Learn empirical variances from X.
83
+
84
+ Parameters
85
+ ----------
86
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
87
+ Data from which to compute variances, where `n_samples` is
88
+ the number of samples and `n_features` is the number of features.
89
+
90
+ y : any, default=None
91
+ Ignored. This parameter exists only for compatibility with
92
+ sklearn.pipeline.Pipeline.
93
+
94
+ Returns
95
+ -------
96
+ self : object
97
+ Returns the instance itself.
98
+ """
99
+ X = self._validate_data(
100
+ X,
101
+ accept_sparse=("csr", "csc"),
102
+ dtype=np.float64,
103
+ force_all_finite="allow-nan",
104
+ )
105
+
106
+ if hasattr(X, "toarray"): # sparse matrix
107
+ _, self.variances_ = mean_variance_axis(X, axis=0)
108
+ if self.threshold == 0:
109
+ mins, maxes = min_max_axis(X, axis=0)
110
+ peak_to_peaks = maxes - mins
111
+ else:
112
+ self.variances_ = np.nanvar(X, axis=0)
113
+ if self.threshold == 0:
114
+ peak_to_peaks = np.ptp(X, axis=0)
115
+
116
+ if self.threshold == 0:
117
+ # Use peak-to-peak to avoid numeric precision issues
118
+ # for constant features
119
+ compare_arr = np.array([self.variances_, peak_to_peaks])
120
+ self.variances_ = np.nanmin(compare_arr, axis=0)
121
+
122
+ if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
123
+ msg = "No feature in X meets the variance threshold {0:.5f}"
124
+ if X.shape[0] == 1:
125
+ msg += " (X contains only one sample)"
126
+ raise ValueError(msg.format(self.threshold))
127
+
128
+ return self
129
+
130
+ def _get_support_mask(self):
131
+ check_is_fitted(self)
132
+
133
+ return self.variances_ > self.threshold
134
+
135
+ def _more_tags(self):
136
+ return {"allow_nan": True}
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc ADDED
Binary file (2.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc ADDED
Binary file (8.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_array_equal
4
+
5
+ from sklearn.base import BaseEstimator
6
+ from sklearn.feature_selection._base import SelectorMixin
7
+ from sklearn.utils.fixes import CSC_CONTAINERS
8
+
9
+
10
+ class StepSelector(SelectorMixin, BaseEstimator):
11
+ """Retain every `step` features (beginning with 0).
12
+
13
+ If `step < 1`, then no features are selected.
14
+ """
15
+
16
+ def __init__(self, step=2):
17
+ self.step = step
18
+
19
+ def fit(self, X, y=None):
20
+ X = self._validate_data(X, accept_sparse="csc")
21
+ return self
22
+
23
+ def _get_support_mask(self):
24
+ mask = np.zeros(self.n_features_in_, dtype=bool)
25
+ if self.step >= 1:
26
+ mask[:: self.step] = True
27
+ return mask
28
+
29
+
30
+ support = [True, False] * 5
31
+ support_inds = [0, 2, 4, 6, 8]
32
+ X = np.arange(20).reshape(2, 10)
33
+ Xt = np.arange(0, 20, 2).reshape(2, 5)
34
+ Xinv = X.copy()
35
+ Xinv[:, 1::2] = 0
36
+ y = [0, 1]
37
+ feature_names = list("ABCDEFGHIJ")
38
+ feature_names_t = feature_names[::2]
39
+ feature_names_inv = np.array(feature_names)
40
+ feature_names_inv[1::2] = ""
41
+
42
+
43
+ def test_transform_dense():
44
+ sel = StepSelector()
45
+ Xt_actual = sel.fit(X, y).transform(X)
46
+ Xt_actual2 = StepSelector().fit_transform(X, y)
47
+ assert_array_equal(Xt, Xt_actual)
48
+ assert_array_equal(Xt, Xt_actual2)
49
+
50
+ # Check dtype matches
51
+ assert np.int32 == sel.transform(X.astype(np.int32)).dtype
52
+ assert np.float32 == sel.transform(X.astype(np.float32)).dtype
53
+
54
+ # Check 1d list and other dtype:
55
+ names_t_actual = sel.transform([feature_names])
56
+ assert_array_equal(feature_names_t, names_t_actual.ravel())
57
+
58
+ # Check wrong shape raises error
59
+ with pytest.raises(ValueError):
60
+ sel.transform(np.array([[1], [2]]))
61
+
62
+
63
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
64
+ def test_transform_sparse(csc_container):
65
+ X_sp = csc_container(X)
66
+ sel = StepSelector()
67
+ Xt_actual = sel.fit(X_sp).transform(X_sp)
68
+ Xt_actual2 = sel.fit_transform(X_sp)
69
+ assert_array_equal(Xt, Xt_actual.toarray())
70
+ assert_array_equal(Xt, Xt_actual2.toarray())
71
+
72
+ # Check dtype matches
73
+ assert np.int32 == sel.transform(X_sp.astype(np.int32)).dtype
74
+ assert np.float32 == sel.transform(X_sp.astype(np.float32)).dtype
75
+
76
+ # Check wrong shape raises error
77
+ with pytest.raises(ValueError):
78
+ sel.transform(np.array([[1], [2]]))
79
+
80
+
81
+ def test_inverse_transform_dense():
82
+ sel = StepSelector()
83
+ Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
84
+ assert_array_equal(Xinv, Xinv_actual)
85
+
86
+ # Check dtype matches
87
+ assert np.int32 == sel.inverse_transform(Xt.astype(np.int32)).dtype
88
+ assert np.float32 == sel.inverse_transform(Xt.astype(np.float32)).dtype
89
+
90
+ # Check 1d list and other dtype:
91
+ names_inv_actual = sel.inverse_transform([feature_names_t])
92
+ assert_array_equal(feature_names_inv, names_inv_actual.ravel())
93
+
94
+ # Check wrong shape raises error
95
+ with pytest.raises(ValueError):
96
+ sel.inverse_transform(np.array([[1], [2]]))
97
+
98
+
99
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
100
+ def test_inverse_transform_sparse(csc_container):
101
+ X_sp = csc_container(X)
102
+ Xt_sp = csc_container(Xt)
103
+ sel = StepSelector()
104
+ Xinv_actual = sel.fit(X_sp).inverse_transform(Xt_sp)
105
+ assert_array_equal(Xinv, Xinv_actual.toarray())
106
+
107
+ # Check dtype matches
108
+ assert np.int32 == sel.inverse_transform(Xt_sp.astype(np.int32)).dtype
109
+ assert np.float32 == sel.inverse_transform(Xt_sp.astype(np.float32)).dtype
110
+
111
+ # Check wrong shape raises error
112
+ with pytest.raises(ValueError):
113
+ sel.inverse_transform(np.array([[1], [2]]))
114
+
115
+
116
+ def test_get_support():
117
+ sel = StepSelector()
118
+ sel.fit(X, y)
119
+ assert_array_equal(support, sel.get_support())
120
+ assert_array_equal(support_inds, sel.get_support(indices=True))
121
+
122
+
123
+ def test_output_dataframe():
124
+ """Check output dtypes for dataframes is consistent with the input dtypes."""
125
+ pd = pytest.importorskip("pandas")
126
+
127
+ X = pd.DataFrame(
128
+ {
129
+ "a": pd.Series([1.0, 2.4, 4.5], dtype=np.float32),
130
+ "b": pd.Series(["a", "b", "a"], dtype="category"),
131
+ "c": pd.Series(["j", "b", "b"], dtype="category"),
132
+ "d": pd.Series([3.0, 2.4, 1.2], dtype=np.float64),
133
+ }
134
+ )
135
+
136
+ for step in [2, 3]:
137
+ sel = StepSelector(step=step).set_output(transform="pandas")
138
+ sel.fit(X)
139
+
140
+ output = sel.transform(X)
141
+ for name, dtype in output.dtypes.items():
142
+ assert dtype == X.dtypes[name]
143
+
144
+ # step=0 will select nothing
145
+ sel0 = StepSelector(step=0).set_output(transform="pandas")
146
+ sel0.fit(X, y)
147
+
148
+ msg = "No features were selected"
149
+ with pytest.warns(UserWarning, match=msg):
150
+ output0 = sel0.transform(X)
151
+
152
+ assert_array_equal(output0.index, X.index)
153
+ assert output0.shape == (X.shape[0], 0)
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for chi2, currently the only feature selection function designed
3
+ specifically to work with sparse matrices.
4
+ """
5
+
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytest
10
+ import scipy.stats
11
+
12
+ from sklearn.feature_selection import SelectKBest, chi2
13
+ from sklearn.feature_selection._univariate_selection import _chisquare
14
+ from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal
15
+ from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS
16
+
17
+ # Feature 0 is highly informative for class 1;
18
+ # feature 1 is the same everywhere;
19
+ # feature 2 is a bit informative for class 2.
20
+ X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]]
21
+ y = [0, 1, 2, 2]
22
+
23
+
24
+ def mkchi2(k):
25
+ """Make k-best chi2 selector"""
26
+ return SelectKBest(chi2, k=k)
27
+
28
+
29
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
30
+ def test_chi2(csr_container):
31
+ # Test Chi2 feature extraction
32
+
33
+ chi2 = mkchi2(k=1).fit(X, y)
34
+ chi2 = mkchi2(k=1).fit(X, y)
35
+ assert_array_equal(chi2.get_support(indices=True), [0])
36
+ assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
37
+
38
+ chi2 = mkchi2(k=2).fit(X, y)
39
+ assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
40
+
41
+ Xsp = csr_container(X, dtype=np.float64)
42
+ chi2 = mkchi2(k=2).fit(Xsp, y)
43
+ assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
44
+ Xtrans = chi2.transform(Xsp)
45
+ assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
46
+
47
+ # == doesn't work on scipy.sparse matrices
48
+ Xtrans = Xtrans.toarray()
49
+ Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
50
+ assert_array_almost_equal(Xtrans, Xtrans2)
51
+
52
+
53
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
54
+ def test_chi2_coo(coo_container):
55
+ # Check that chi2 works with a COO matrix
56
+ # (as returned by CountVectorizer, DictVectorizer)
57
+ Xcoo = coo_container(X)
58
+ mkchi2(k=2).fit_transform(Xcoo, y)
59
+ # if we got here without an exception, we're safe
60
+
61
+
62
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
63
+ def test_chi2_negative(csr_container):
64
+ # Check for proper error on negative numbers in the input X.
65
+ X, y = [[0, 1], [-1e-20, 1]], [0, 1]
66
+ for X in (X, np.array(X), csr_container(X)):
67
+ with pytest.raises(ValueError):
68
+ chi2(X, y)
69
+
70
+
71
+ def test_chi2_unused_feature():
72
+ # Unused feature should evaluate to NaN
73
+ # and should issue no runtime warning
74
+ with warnings.catch_warnings(record=True) as warned:
75
+ warnings.simplefilter("always")
76
+ chi, p = chi2([[1, 0], [0, 0]], [1, 0])
77
+ for w in warned:
78
+ if "divide by zero" in repr(w):
79
+ raise AssertionError("Found unexpected warning %s" % w)
80
+ assert_array_equal(chi, [1, np.nan])
81
+ assert_array_equal(p[1], np.nan)
82
+
83
+
84
+ def test_chisquare():
85
+ # Test replacement for scipy.stats.chisquare against the original.
86
+ obs = np.array([[2.0, 2.0], [1.0, 1.0]])
87
+ exp = np.array([[1.5, 1.5], [1.5, 1.5]])
88
+ # call SciPy first because our version overwrites obs
89
+ chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
90
+ chi_our, p_our = _chisquare(obs, exp)
91
+
92
+ assert_array_almost_equal(chi_scp, chi_our)
93
+ assert_array_almost_equal(p_scp, p_our)
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py ADDED
@@ -0,0 +1,1017 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Todo: cross-check the F-value with stats model
3
+ """
4
+ import itertools
5
+ import warnings
6
+
7
+ import numpy as np
8
+ import pytest
9
+ from numpy.testing import assert_allclose
10
+ from scipy import sparse, stats
11
+
12
+ from sklearn.datasets import load_iris, make_classification, make_regression
13
+ from sklearn.feature_selection import (
14
+ GenericUnivariateSelect,
15
+ SelectFdr,
16
+ SelectFpr,
17
+ SelectFwe,
18
+ SelectKBest,
19
+ SelectPercentile,
20
+ chi2,
21
+ f_classif,
22
+ f_oneway,
23
+ f_regression,
24
+ mutual_info_classif,
25
+ mutual_info_regression,
26
+ r_regression,
27
+ )
28
+ from sklearn.utils import safe_mask
29
+ from sklearn.utils._testing import (
30
+ _convert_container,
31
+ assert_almost_equal,
32
+ assert_array_almost_equal,
33
+ assert_array_equal,
34
+ ignore_warnings,
35
+ )
36
+ from sklearn.utils.fixes import CSR_CONTAINERS
37
+
38
+ ##############################################################################
39
+ # Test the score functions
40
+
41
+
42
+ def test_f_oneway_vs_scipy_stats():
43
+ # Test that our f_oneway gives the same result as scipy.stats
44
+ rng = np.random.RandomState(0)
45
+ X1 = rng.randn(10, 3)
46
+ X2 = 1 + rng.randn(10, 3)
47
+ f, pv = stats.f_oneway(X1, X2)
48
+ f2, pv2 = f_oneway(X1, X2)
49
+ assert np.allclose(f, f2)
50
+ assert np.allclose(pv, pv2)
51
+
52
+
53
+ def test_f_oneway_ints():
54
+ # Smoke test f_oneway on integers: that it does raise casting errors
55
+ # with recent numpys
56
+ rng = np.random.RandomState(0)
57
+ X = rng.randint(10, size=(10, 10))
58
+ y = np.arange(10)
59
+ fint, pint = f_oneway(X, y)
60
+
61
+ # test that is gives the same result as with float
62
+ f, p = f_oneway(X.astype(float), y)
63
+ assert_array_almost_equal(f, fint, decimal=4)
64
+ assert_array_almost_equal(p, pint, decimal=4)
65
+
66
+
67
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
68
+ def test_f_classif(csr_container):
69
+ # Test whether the F test yields meaningful results
70
+ # on a simple simulated classification problem
71
+ X, y = make_classification(
72
+ n_samples=200,
73
+ n_features=20,
74
+ n_informative=3,
75
+ n_redundant=2,
76
+ n_repeated=0,
77
+ n_classes=8,
78
+ n_clusters_per_class=1,
79
+ flip_y=0.0,
80
+ class_sep=10,
81
+ shuffle=False,
82
+ random_state=0,
83
+ )
84
+
85
+ F, pv = f_classif(X, y)
86
+ F_sparse, pv_sparse = f_classif(csr_container(X), y)
87
+ assert (F > 0).all()
88
+ assert (pv > 0).all()
89
+ assert (pv < 1).all()
90
+ assert (pv[:5] < 0.05).all()
91
+ assert (pv[5:] > 1.0e-4).all()
92
+ assert_array_almost_equal(F_sparse, F)
93
+ assert_array_almost_equal(pv_sparse, pv)
94
+
95
+
96
+ @pytest.mark.parametrize("center", [True, False])
97
+ def test_r_regression(center):
98
+ X, y = make_regression(
99
+ n_samples=2000, n_features=20, n_informative=5, shuffle=False, random_state=0
100
+ )
101
+
102
+ corr_coeffs = r_regression(X, y, center=center)
103
+ assert (-1 < corr_coeffs).all()
104
+ assert (corr_coeffs < 1).all()
105
+
106
+ sparse_X = _convert_container(X, "sparse")
107
+
108
+ sparse_corr_coeffs = r_regression(sparse_X, y, center=center)
109
+ assert_allclose(sparse_corr_coeffs, corr_coeffs)
110
+
111
+ # Testing against numpy for reference
112
+ Z = np.hstack((X, y[:, np.newaxis]))
113
+ correlation_matrix = np.corrcoef(Z, rowvar=False)
114
+ np_corr_coeffs = correlation_matrix[:-1, -1]
115
+ assert_array_almost_equal(np_corr_coeffs, corr_coeffs, decimal=3)
116
+
117
+
118
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
119
+ def test_f_regression(csr_container):
120
+ # Test whether the F test yields meaningful results
121
+ # on a simple simulated regression problem
122
+ X, y = make_regression(
123
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
124
+ )
125
+
126
+ F, pv = f_regression(X, y)
127
+ assert (F > 0).all()
128
+ assert (pv > 0).all()
129
+ assert (pv < 1).all()
130
+ assert (pv[:5] < 0.05).all()
131
+ assert (pv[5:] > 1.0e-4).all()
132
+
133
+ # with centering, compare with sparse
134
+ F, pv = f_regression(X, y, center=True)
135
+ F_sparse, pv_sparse = f_regression(csr_container(X), y, center=True)
136
+ assert_allclose(F_sparse, F)
137
+ assert_allclose(pv_sparse, pv)
138
+
139
+ # again without centering, compare with sparse
140
+ F, pv = f_regression(X, y, center=False)
141
+ F_sparse, pv_sparse = f_regression(csr_container(X), y, center=False)
142
+ assert_allclose(F_sparse, F)
143
+ assert_allclose(pv_sparse, pv)
144
+
145
+
146
+ def test_f_regression_input_dtype():
147
+ # Test whether f_regression returns the same value
148
+ # for any numeric data_type
149
+ rng = np.random.RandomState(0)
150
+ X = rng.rand(10, 20)
151
+ y = np.arange(10).astype(int)
152
+
153
+ F1, pv1 = f_regression(X, y)
154
+ F2, pv2 = f_regression(X, y.astype(float))
155
+ assert_allclose(F1, F2, 5)
156
+ assert_allclose(pv1, pv2, 5)
157
+
158
+
159
+ def test_f_regression_center():
160
+ # Test whether f_regression preserves dof according to 'center' argument
161
+ # We use two centered variates so we have a simple relationship between
162
+ # F-score with variates centering and F-score without variates centering.
163
+ # Create toy example
164
+ X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
165
+ n_samples = X.size
166
+ Y = np.ones(n_samples)
167
+ Y[::2] *= -1.0
168
+ Y[0] = 0.0 # have Y mean being null
169
+
170
+ F1, _ = f_regression(X, Y, center=True)
171
+ F2, _ = f_regression(X, Y, center=False)
172
+ assert_allclose(F1 * (n_samples - 1.0) / (n_samples - 2.0), F2)
173
+ assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
174
+
175
+
176
+ @pytest.mark.parametrize(
177
+ "X, y, expected_corr_coef, force_finite",
178
+ [
179
+ (
180
+ # A feature in X is constant - forcing finite
181
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
182
+ np.array([0, 1, 1, 0]),
183
+ np.array([0.0, 0.32075]),
184
+ True,
185
+ ),
186
+ (
187
+ # The target y is constant - forcing finite
188
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
189
+ np.array([0, 0, 0, 0]),
190
+ np.array([0.0, 0.0]),
191
+ True,
192
+ ),
193
+ (
194
+ # A feature in X is constant - not forcing finite
195
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
196
+ np.array([0, 1, 1, 0]),
197
+ np.array([np.nan, 0.32075]),
198
+ False,
199
+ ),
200
+ (
201
+ # The target y is constant - not forcing finite
202
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
203
+ np.array([0, 0, 0, 0]),
204
+ np.array([np.nan, np.nan]),
205
+ False,
206
+ ),
207
+ ],
208
+ )
209
+ def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite):
210
+ """Check the behaviour of `force_finite` for some corner cases with `r_regression`.
211
+
212
+ Non-regression test for:
213
+ https://github.com/scikit-learn/scikit-learn/issues/15672
214
+ """
215
+ with warnings.catch_warnings():
216
+ warnings.simplefilter("error", RuntimeWarning)
217
+ corr_coef = r_regression(X, y, force_finite=force_finite)
218
+ np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef)
219
+
220
+
221
+ @pytest.mark.parametrize(
222
+ "X, y, expected_f_statistic, expected_p_values, force_finite",
223
+ [
224
+ (
225
+ # A feature in X is constant - forcing finite
226
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
227
+ np.array([0, 1, 1, 0]),
228
+ np.array([0.0, 0.2293578]),
229
+ np.array([1.0, 0.67924985]),
230
+ True,
231
+ ),
232
+ (
233
+ # The target y is constant - forcing finite
234
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
235
+ np.array([0, 0, 0, 0]),
236
+ np.array([0.0, 0.0]),
237
+ np.array([1.0, 1.0]),
238
+ True,
239
+ ),
240
+ (
241
+ # Feature in X correlated with y - forcing finite
242
+ np.array([[0, 1], [1, 0], [2, 10], [3, 4]]),
243
+ np.array([0, 1, 2, 3]),
244
+ np.array([np.finfo(np.float64).max, 0.845433]),
245
+ np.array([0.0, 0.454913]),
246
+ True,
247
+ ),
248
+ (
249
+ # Feature in X anti-correlated with y - forcing finite
250
+ np.array([[3, 1], [2, 0], [1, 10], [0, 4]]),
251
+ np.array([0, 1, 2, 3]),
252
+ np.array([np.finfo(np.float64).max, 0.845433]),
253
+ np.array([0.0, 0.454913]),
254
+ True,
255
+ ),
256
+ (
257
+ # A feature in X is constant - not forcing finite
258
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
259
+ np.array([0, 1, 1, 0]),
260
+ np.array([np.nan, 0.2293578]),
261
+ np.array([np.nan, 0.67924985]),
262
+ False,
263
+ ),
264
+ (
265
+ # The target y is constant - not forcing finite
266
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
267
+ np.array([0, 0, 0, 0]),
268
+ np.array([np.nan, np.nan]),
269
+ np.array([np.nan, np.nan]),
270
+ False,
271
+ ),
272
+ (
273
+ # Feature in X correlated with y - not forcing finite
274
+ np.array([[0, 1], [1, 0], [2, 10], [3, 4]]),
275
+ np.array([0, 1, 2, 3]),
276
+ np.array([np.inf, 0.845433]),
277
+ np.array([0.0, 0.454913]),
278
+ False,
279
+ ),
280
+ (
281
+ # Feature in X anti-correlated with y - not forcing finite
282
+ np.array([[3, 1], [2, 0], [1, 10], [0, 4]]),
283
+ np.array([0, 1, 2, 3]),
284
+ np.array([np.inf, 0.845433]),
285
+ np.array([0.0, 0.454913]),
286
+ False,
287
+ ),
288
+ ],
289
+ )
290
+ def test_f_regression_corner_case(
291
+ X, y, expected_f_statistic, expected_p_values, force_finite
292
+ ):
293
+ """Check the behaviour of `force_finite` for some corner cases with `f_regression`.
294
+
295
+ Non-regression test for:
296
+ https://github.com/scikit-learn/scikit-learn/issues/15672
297
+ """
298
+ with warnings.catch_warnings():
299
+ warnings.simplefilter("error", RuntimeWarning)
300
+ f_statistic, p_values = f_regression(X, y, force_finite=force_finite)
301
+ np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic)
302
+ np.testing.assert_array_almost_equal(p_values, expected_p_values)
303
+
304
+
305
+ def test_f_classif_multi_class():
306
+ # Test whether the F test yields meaningful results
307
+ # on a simple simulated classification problem
308
+ X, y = make_classification(
309
+ n_samples=200,
310
+ n_features=20,
311
+ n_informative=3,
312
+ n_redundant=2,
313
+ n_repeated=0,
314
+ n_classes=8,
315
+ n_clusters_per_class=1,
316
+ flip_y=0.0,
317
+ class_sep=10,
318
+ shuffle=False,
319
+ random_state=0,
320
+ )
321
+
322
+ F, pv = f_classif(X, y)
323
+ assert (F > 0).all()
324
+ assert (pv > 0).all()
325
+ assert (pv < 1).all()
326
+ assert (pv[:5] < 0.05).all()
327
+ assert (pv[5:] > 1.0e-4).all()
328
+
329
+
330
+ def test_select_percentile_classif():
331
+ # Test whether the relative univariate feature selection
332
+ # gets the correct items in a simple classification problem
333
+ # with the percentile heuristic
334
+ X, y = make_classification(
335
+ n_samples=200,
336
+ n_features=20,
337
+ n_informative=3,
338
+ n_redundant=2,
339
+ n_repeated=0,
340
+ n_classes=8,
341
+ n_clusters_per_class=1,
342
+ flip_y=0.0,
343
+ class_sep=10,
344
+ shuffle=False,
345
+ random_state=0,
346
+ )
347
+
348
+ univariate_filter = SelectPercentile(f_classif, percentile=25)
349
+ X_r = univariate_filter.fit(X, y).transform(X)
350
+ X_r2 = (
351
+ GenericUnivariateSelect(f_classif, mode="percentile", param=25)
352
+ .fit(X, y)
353
+ .transform(X)
354
+ )
355
+ assert_array_equal(X_r, X_r2)
356
+ support = univariate_filter.get_support()
357
+ gtruth = np.zeros(20)
358
+ gtruth[:5] = 1
359
+ assert_array_equal(support, gtruth)
360
+
361
+
362
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
363
+ def test_select_percentile_classif_sparse(csr_container):
364
+ # Test whether the relative univariate feature selection
365
+ # gets the correct items in a simple classification problem
366
+ # with the percentile heuristic
367
+ X, y = make_classification(
368
+ n_samples=200,
369
+ n_features=20,
370
+ n_informative=3,
371
+ n_redundant=2,
372
+ n_repeated=0,
373
+ n_classes=8,
374
+ n_clusters_per_class=1,
375
+ flip_y=0.0,
376
+ class_sep=10,
377
+ shuffle=False,
378
+ random_state=0,
379
+ )
380
+ X = csr_container(X)
381
+ univariate_filter = SelectPercentile(f_classif, percentile=25)
382
+ X_r = univariate_filter.fit(X, y).transform(X)
383
+ X_r2 = (
384
+ GenericUnivariateSelect(f_classif, mode="percentile", param=25)
385
+ .fit(X, y)
386
+ .transform(X)
387
+ )
388
+ assert_array_equal(X_r.toarray(), X_r2.toarray())
389
+ support = univariate_filter.get_support()
390
+ gtruth = np.zeros(20)
391
+ gtruth[:5] = 1
392
+ assert_array_equal(support, gtruth)
393
+
394
+ X_r2inv = univariate_filter.inverse_transform(X_r2)
395
+ assert sparse.issparse(X_r2inv)
396
+ support_mask = safe_mask(X_r2inv, support)
397
+ assert X_r2inv.shape == X.shape
398
+ assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
399
+ # Check other columns are empty
400
+ assert X_r2inv.nnz == X_r.nnz
401
+
402
+
403
+ ##############################################################################
404
+ # Test univariate selection in classification settings
405
+
406
+
407
+ def test_select_kbest_classif():
408
+ # Test whether the relative univariate feature selection
409
+ # gets the correct items in a simple classification problem
410
+ # with the k best heuristic
411
+ X, y = make_classification(
412
+ n_samples=200,
413
+ n_features=20,
414
+ n_informative=3,
415
+ n_redundant=2,
416
+ n_repeated=0,
417
+ n_classes=8,
418
+ n_clusters_per_class=1,
419
+ flip_y=0.0,
420
+ class_sep=10,
421
+ shuffle=False,
422
+ random_state=0,
423
+ )
424
+
425
+ univariate_filter = SelectKBest(f_classif, k=5)
426
+ X_r = univariate_filter.fit(X, y).transform(X)
427
+ X_r2 = (
428
+ GenericUnivariateSelect(f_classif, mode="k_best", param=5)
429
+ .fit(X, y)
430
+ .transform(X)
431
+ )
432
+ assert_array_equal(X_r, X_r2)
433
+ support = univariate_filter.get_support()
434
+ gtruth = np.zeros(20)
435
+ gtruth[:5] = 1
436
+ assert_array_equal(support, gtruth)
437
+
438
+
439
+ def test_select_kbest_all():
440
+ # Test whether k="all" correctly returns all features.
441
+ X, y = make_classification(
442
+ n_samples=20, n_features=10, shuffle=False, random_state=0
443
+ )
444
+
445
+ univariate_filter = SelectKBest(f_classif, k="all")
446
+ X_r = univariate_filter.fit(X, y).transform(X)
447
+ assert_array_equal(X, X_r)
448
+ # Non-regression test for:
449
+ # https://github.com/scikit-learn/scikit-learn/issues/24949
450
+ X_r2 = (
451
+ GenericUnivariateSelect(f_classif, mode="k_best", param="all")
452
+ .fit(X, y)
453
+ .transform(X)
454
+ )
455
+ assert_array_equal(X_r, X_r2)
456
+
457
+
458
+ @pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
459
+ def test_select_kbest_zero(dtype_in):
460
+ # Test whether k=0 correctly returns no features.
461
+ X, y = make_classification(
462
+ n_samples=20, n_features=10, shuffle=False, random_state=0
463
+ )
464
+ X = X.astype(dtype_in)
465
+
466
+ univariate_filter = SelectKBest(f_classif, k=0)
467
+ univariate_filter.fit(X, y)
468
+ support = univariate_filter.get_support()
469
+ gtruth = np.zeros(10, dtype=bool)
470
+ assert_array_equal(support, gtruth)
471
+ with pytest.warns(UserWarning, match="No features were selected"):
472
+ X_selected = univariate_filter.transform(X)
473
+ assert X_selected.shape == (20, 0)
474
+ assert X_selected.dtype == dtype_in
475
+
476
+
477
+ def test_select_heuristics_classif():
478
+ # Test whether the relative univariate feature selection
479
+ # gets the correct items in a simple classification problem
480
+ # with the fdr, fwe and fpr heuristics
481
+ X, y = make_classification(
482
+ n_samples=200,
483
+ n_features=20,
484
+ n_informative=3,
485
+ n_redundant=2,
486
+ n_repeated=0,
487
+ n_classes=8,
488
+ n_clusters_per_class=1,
489
+ flip_y=0.0,
490
+ class_sep=10,
491
+ shuffle=False,
492
+ random_state=0,
493
+ )
494
+
495
+ univariate_filter = SelectFwe(f_classif, alpha=0.01)
496
+ X_r = univariate_filter.fit(X, y).transform(X)
497
+ gtruth = np.zeros(20)
498
+ gtruth[:5] = 1
499
+ for mode in ["fdr", "fpr", "fwe"]:
500
+ X_r2 = (
501
+ GenericUnivariateSelect(f_classif, mode=mode, param=0.01)
502
+ .fit(X, y)
503
+ .transform(X)
504
+ )
505
+ assert_array_equal(X_r, X_r2)
506
+ support = univariate_filter.get_support()
507
+ assert_allclose(support, gtruth)
508
+
509
+
510
+ ##############################################################################
511
+ # Test univariate selection in regression settings
512
+
513
+
514
+ def assert_best_scores_kept(score_filter):
515
+ scores = score_filter.scores_
516
+ support = score_filter.get_support()
517
+ assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum() :])
518
+
519
+
520
+ def test_select_percentile_regression():
521
+ # Test whether the relative univariate feature selection
522
+ # gets the correct items in a simple regression problem
523
+ # with the percentile heuristic
524
+ X, y = make_regression(
525
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
526
+ )
527
+
528
+ univariate_filter = SelectPercentile(f_regression, percentile=25)
529
+ X_r = univariate_filter.fit(X, y).transform(X)
530
+ assert_best_scores_kept(univariate_filter)
531
+ X_r2 = (
532
+ GenericUnivariateSelect(f_regression, mode="percentile", param=25)
533
+ .fit(X, y)
534
+ .transform(X)
535
+ )
536
+ assert_array_equal(X_r, X_r2)
537
+ support = univariate_filter.get_support()
538
+ gtruth = np.zeros(20)
539
+ gtruth[:5] = 1
540
+ assert_array_equal(support, gtruth)
541
+ X_2 = X.copy()
542
+ X_2[:, np.logical_not(support)] = 0
543
+ assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
544
+ # Check inverse_transform respects dtype
545
+ assert_array_equal(
546
+ X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool))
547
+ )
548
+
549
+
550
+ def test_select_percentile_regression_full():
551
+ # Test whether the relative univariate feature selection
552
+ # selects all features when '100%' is asked.
553
+ X, y = make_regression(
554
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
555
+ )
556
+
557
+ univariate_filter = SelectPercentile(f_regression, percentile=100)
558
+ X_r = univariate_filter.fit(X, y).transform(X)
559
+ assert_best_scores_kept(univariate_filter)
560
+ X_r2 = (
561
+ GenericUnivariateSelect(f_regression, mode="percentile", param=100)
562
+ .fit(X, y)
563
+ .transform(X)
564
+ )
565
+ assert_array_equal(X_r, X_r2)
566
+ support = univariate_filter.get_support()
567
+ gtruth = np.ones(20)
568
+ assert_array_equal(support, gtruth)
569
+
570
+
571
+ def test_select_kbest_regression():
572
+ # Test whether the relative univariate feature selection
573
+ # gets the correct items in a simple regression problem
574
+ # with the k best heuristic
575
+ X, y = make_regression(
576
+ n_samples=200,
577
+ n_features=20,
578
+ n_informative=5,
579
+ shuffle=False,
580
+ random_state=0,
581
+ noise=10,
582
+ )
583
+
584
+ univariate_filter = SelectKBest(f_regression, k=5)
585
+ X_r = univariate_filter.fit(X, y).transform(X)
586
+ assert_best_scores_kept(univariate_filter)
587
+ X_r2 = (
588
+ GenericUnivariateSelect(f_regression, mode="k_best", param=5)
589
+ .fit(X, y)
590
+ .transform(X)
591
+ )
592
+ assert_array_equal(X_r, X_r2)
593
+ support = univariate_filter.get_support()
594
+ gtruth = np.zeros(20)
595
+ gtruth[:5] = 1
596
+ assert_array_equal(support, gtruth)
597
+
598
+
599
+ def test_select_heuristics_regression():
600
+ # Test whether the relative univariate feature selection
601
+ # gets the correct items in a simple regression problem
602
+ # with the fpr, fdr or fwe heuristics
603
+ X, y = make_regression(
604
+ n_samples=200,
605
+ n_features=20,
606
+ n_informative=5,
607
+ shuffle=False,
608
+ random_state=0,
609
+ noise=10,
610
+ )
611
+
612
+ univariate_filter = SelectFpr(f_regression, alpha=0.01)
613
+ X_r = univariate_filter.fit(X, y).transform(X)
614
+ gtruth = np.zeros(20)
615
+ gtruth[:5] = 1
616
+ for mode in ["fdr", "fpr", "fwe"]:
617
+ X_r2 = (
618
+ GenericUnivariateSelect(f_regression, mode=mode, param=0.01)
619
+ .fit(X, y)
620
+ .transform(X)
621
+ )
622
+ assert_array_equal(X_r, X_r2)
623
+ support = univariate_filter.get_support()
624
+ assert_array_equal(support[:5], np.ones((5,), dtype=bool))
625
+ assert np.sum(support[5:] == 1) < 3
626
+
627
+
628
+ def test_boundary_case_ch2():
629
+ # Test boundary case, and always aim to select 1 feature.
630
+ X = np.array([[10, 20], [20, 20], [20, 30]])
631
+ y = np.array([[1], [0], [0]])
632
+ scores, pvalues = chi2(X, y)
633
+ assert_array_almost_equal(scores, np.array([4.0, 0.71428571]))
634
+ assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
635
+
636
+ filter_fdr = SelectFdr(chi2, alpha=0.1)
637
+ filter_fdr.fit(X, y)
638
+ support_fdr = filter_fdr.get_support()
639
+ assert_array_equal(support_fdr, np.array([True, False]))
640
+
641
+ filter_kbest = SelectKBest(chi2, k=1)
642
+ filter_kbest.fit(X, y)
643
+ support_kbest = filter_kbest.get_support()
644
+ assert_array_equal(support_kbest, np.array([True, False]))
645
+
646
+ filter_percentile = SelectPercentile(chi2, percentile=50)
647
+ filter_percentile.fit(X, y)
648
+ support_percentile = filter_percentile.get_support()
649
+ assert_array_equal(support_percentile, np.array([True, False]))
650
+
651
+ filter_fpr = SelectFpr(chi2, alpha=0.1)
652
+ filter_fpr.fit(X, y)
653
+ support_fpr = filter_fpr.get_support()
654
+ assert_array_equal(support_fpr, np.array([True, False]))
655
+
656
+ filter_fwe = SelectFwe(chi2, alpha=0.1)
657
+ filter_fwe.fit(X, y)
658
+ support_fwe = filter_fwe.get_support()
659
+ assert_array_equal(support_fwe, np.array([True, False]))
660
+
661
+
662
+ @pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1])
663
+ @pytest.mark.parametrize("n_informative", [1, 5, 10])
664
+ def test_select_fdr_regression(alpha, n_informative):
665
+ # Test that fdr heuristic actually has low FDR.
666
+ def single_fdr(alpha, n_informative, random_state):
667
+ X, y = make_regression(
668
+ n_samples=150,
669
+ n_features=20,
670
+ n_informative=n_informative,
671
+ shuffle=False,
672
+ random_state=random_state,
673
+ noise=10,
674
+ )
675
+
676
+ with warnings.catch_warnings(record=True):
677
+ # Warnings can be raised when no features are selected
678
+ # (low alpha or very noisy data)
679
+ univariate_filter = SelectFdr(f_regression, alpha=alpha)
680
+ X_r = univariate_filter.fit(X, y).transform(X)
681
+ X_r2 = (
682
+ GenericUnivariateSelect(f_regression, mode="fdr", param=alpha)
683
+ .fit(X, y)
684
+ .transform(X)
685
+ )
686
+
687
+ assert_array_equal(X_r, X_r2)
688
+ support = univariate_filter.get_support()
689
+ num_false_positives = np.sum(support[n_informative:] == 1)
690
+ num_true_positives = np.sum(support[:n_informative] == 1)
691
+
692
+ if num_false_positives == 0:
693
+ return 0.0
694
+ false_discovery_rate = num_false_positives / (
695
+ num_true_positives + num_false_positives
696
+ )
697
+ return false_discovery_rate
698
+
699
+ # As per Benjamini-Hochberg, the expected false discovery rate
700
+ # should be lower than alpha:
701
+ # FDR = E(FP / (TP + FP)) <= alpha
702
+ false_discovery_rate = np.mean(
703
+ [single_fdr(alpha, n_informative, random_state) for random_state in range(100)]
704
+ )
705
+ assert alpha >= false_discovery_rate
706
+
707
+ # Make sure that the empirical false discovery rate increases
708
+ # with alpha:
709
+ if false_discovery_rate != 0:
710
+ assert false_discovery_rate > alpha / 10
711
+
712
+
713
+ def test_select_fwe_regression():
714
+ # Test whether the relative univariate feature selection
715
+ # gets the correct items in a simple regression problem
716
+ # with the fwe heuristic
717
+ X, y = make_regression(
718
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
719
+ )
720
+
721
+ univariate_filter = SelectFwe(f_regression, alpha=0.01)
722
+ X_r = univariate_filter.fit(X, y).transform(X)
723
+ X_r2 = (
724
+ GenericUnivariateSelect(f_regression, mode="fwe", param=0.01)
725
+ .fit(X, y)
726
+ .transform(X)
727
+ )
728
+ assert_array_equal(X_r, X_r2)
729
+ support = univariate_filter.get_support()
730
+ gtruth = np.zeros(20)
731
+ gtruth[:5] = 1
732
+ assert_array_equal(support[:5], np.ones((5,), dtype=bool))
733
+ assert np.sum(support[5:] == 1) < 2
734
+
735
+
736
+ def test_selectkbest_tiebreaking():
737
+ # Test whether SelectKBest actually selects k features in case of ties.
738
+ # Prior to 0.11, SelectKBest would return more features than requested.
739
+ Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
740
+ y = [1]
741
+ dummy_score = lambda X, y: (X[0], X[0])
742
+ for X in Xs:
743
+ sel = SelectKBest(dummy_score, k=1)
744
+ X1 = ignore_warnings(sel.fit_transform)([X], y)
745
+ assert X1.shape[1] == 1
746
+ assert_best_scores_kept(sel)
747
+
748
+ sel = SelectKBest(dummy_score, k=2)
749
+ X2 = ignore_warnings(sel.fit_transform)([X], y)
750
+ assert X2.shape[1] == 2
751
+ assert_best_scores_kept(sel)
752
+
753
+
754
+ def test_selectpercentile_tiebreaking():
755
+ # Test if SelectPercentile selects the right n_features in case of ties.
756
+ Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
757
+ y = [1]
758
+ dummy_score = lambda X, y: (X[0], X[0])
759
+ for X in Xs:
760
+ sel = SelectPercentile(dummy_score, percentile=34)
761
+ X1 = ignore_warnings(sel.fit_transform)([X], y)
762
+ assert X1.shape[1] == 1
763
+ assert_best_scores_kept(sel)
764
+
765
+ sel = SelectPercentile(dummy_score, percentile=67)
766
+ X2 = ignore_warnings(sel.fit_transform)([X], y)
767
+ assert X2.shape[1] == 2
768
+ assert_best_scores_kept(sel)
769
+
770
+
771
+ def test_tied_pvalues():
772
+ # Test whether k-best and percentiles work with tied pvalues from chi2.
773
+ # chi2 will return the same p-values for the following features, but it
774
+ # will return different scores.
775
+ X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
776
+ y = [0, 1]
777
+
778
+ for perm in itertools.permutations((0, 1, 2)):
779
+ X = X0[:, perm]
780
+ Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
781
+ assert Xt.shape == (2, 2)
782
+ assert 9998 not in Xt
783
+
784
+ Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
785
+ assert Xt.shape == (2, 2)
786
+ assert 9998 not in Xt
787
+
788
+
789
+ def test_scorefunc_multilabel():
790
+ # Test whether k-best and percentiles works with multilabels with chi2.
791
+
792
+ X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
793
+ y = [[1, 1], [0, 1], [1, 0]]
794
+
795
+ Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
796
+ assert Xt.shape == (3, 2)
797
+ assert 0 not in Xt
798
+
799
+ Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
800
+ assert Xt.shape == (3, 2)
801
+ assert 0 not in Xt
802
+
803
+
804
+ def test_tied_scores():
805
+ # Test for stable sorting in k-best with tied scores.
806
+ X_train = np.array([[0, 0, 0], [1, 1, 1]])
807
+ y_train = [0, 1]
808
+
809
+ for n_features in [1, 2, 3]:
810
+ sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
811
+ X_test = sel.transform([[0, 1, 2]])
812
+ assert_array_equal(X_test[0], np.arange(3)[-n_features:])
813
+
814
+
815
+ def test_nans():
816
+ # Assert that SelectKBest and SelectPercentile can handle NaNs.
817
+ # First feature has zero variance to confuse f_classif (ANOVA) and
818
+ # make it return a NaN.
819
+ X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]]
820
+ y = [1, 0, 1]
821
+
822
+ for select in (
823
+ SelectKBest(f_classif, k=2),
824
+ SelectPercentile(f_classif, percentile=67),
825
+ ):
826
+ ignore_warnings(select.fit)(X, y)
827
+ assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
828
+
829
+
830
+ def test_invalid_k():
831
+ X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]]
832
+ y = [1, 0, 1]
833
+
834
+ msg = "k=4 is greater than n_features=3. All the features will be returned."
835
+ with pytest.warns(UserWarning, match=msg):
836
+ SelectKBest(k=4).fit(X, y)
837
+ with pytest.warns(UserWarning, match=msg):
838
+ GenericUnivariateSelect(mode="k_best", param=4).fit(X, y)
839
+
840
+
841
+ def test_f_classif_constant_feature():
842
+ # Test that f_classif warns if a feature is constant throughout.
843
+
844
+ X, y = make_classification(n_samples=10, n_features=5)
845
+ X[:, 0] = 2.0
846
+ with pytest.warns(UserWarning):
847
+ f_classif(X, y)
848
+
849
+
850
+ def test_no_feature_selected():
851
+ rng = np.random.RandomState(0)
852
+
853
+ # Generate random uncorrelated data: a strict univariate test should
854
+ # rejects all the features
855
+ X = rng.rand(40, 10)
856
+ y = rng.randint(0, 4, size=40)
857
+ strict_selectors = [
858
+ SelectFwe(alpha=0.01).fit(X, y),
859
+ SelectFdr(alpha=0.01).fit(X, y),
860
+ SelectFpr(alpha=0.01).fit(X, y),
861
+ SelectPercentile(percentile=0).fit(X, y),
862
+ SelectKBest(k=0).fit(X, y),
863
+ ]
864
+ for selector in strict_selectors:
865
+ assert_array_equal(selector.get_support(), np.zeros(10))
866
+ with pytest.warns(UserWarning, match="No features were selected"):
867
+ X_selected = selector.transform(X)
868
+ assert X_selected.shape == (40, 0)
869
+
870
+
871
+ def test_mutual_info_classif():
872
+ X, y = make_classification(
873
+ n_samples=100,
874
+ n_features=5,
875
+ n_informative=1,
876
+ n_redundant=1,
877
+ n_repeated=0,
878
+ n_classes=2,
879
+ n_clusters_per_class=1,
880
+ flip_y=0.0,
881
+ class_sep=10,
882
+ shuffle=False,
883
+ random_state=0,
884
+ )
885
+
886
+ # Test in KBest mode.
887
+ univariate_filter = SelectKBest(mutual_info_classif, k=2)
888
+ X_r = univariate_filter.fit(X, y).transform(X)
889
+ X_r2 = (
890
+ GenericUnivariateSelect(mutual_info_classif, mode="k_best", param=2)
891
+ .fit(X, y)
892
+ .transform(X)
893
+ )
894
+ assert_array_equal(X_r, X_r2)
895
+ support = univariate_filter.get_support()
896
+ gtruth = np.zeros(5)
897
+ gtruth[:2] = 1
898
+ assert_array_equal(support, gtruth)
899
+
900
+ # Test in Percentile mode.
901
+ univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
902
+ X_r = univariate_filter.fit(X, y).transform(X)
903
+ X_r2 = (
904
+ GenericUnivariateSelect(mutual_info_classif, mode="percentile", param=40)
905
+ .fit(X, y)
906
+ .transform(X)
907
+ )
908
+ assert_array_equal(X_r, X_r2)
909
+ support = univariate_filter.get_support()
910
+ gtruth = np.zeros(5)
911
+ gtruth[:2] = 1
912
+ assert_array_equal(support, gtruth)
913
+
914
+
915
+ def test_mutual_info_regression():
916
+ X, y = make_regression(
917
+ n_samples=100,
918
+ n_features=10,
919
+ n_informative=2,
920
+ shuffle=False,
921
+ random_state=0,
922
+ noise=10,
923
+ )
924
+
925
+ # Test in KBest mode.
926
+ univariate_filter = SelectKBest(mutual_info_regression, k=2)
927
+ X_r = univariate_filter.fit(X, y).transform(X)
928
+ assert_best_scores_kept(univariate_filter)
929
+ X_r2 = (
930
+ GenericUnivariateSelect(mutual_info_regression, mode="k_best", param=2)
931
+ .fit(X, y)
932
+ .transform(X)
933
+ )
934
+ assert_array_equal(X_r, X_r2)
935
+ support = univariate_filter.get_support()
936
+ gtruth = np.zeros(10)
937
+ gtruth[:2] = 1
938
+ assert_array_equal(support, gtruth)
939
+
940
+ # Test in Percentile mode.
941
+ univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
942
+ X_r = univariate_filter.fit(X, y).transform(X)
943
+ X_r2 = (
944
+ GenericUnivariateSelect(mutual_info_regression, mode="percentile", param=20)
945
+ .fit(X, y)
946
+ .transform(X)
947
+ )
948
+ assert_array_equal(X_r, X_r2)
949
+ support = univariate_filter.get_support()
950
+ gtruth = np.zeros(10)
951
+ gtruth[:2] = 1
952
+ assert_array_equal(support, gtruth)
953
+
954
+
955
+ def test_dataframe_output_dtypes():
956
+ """Check that the output datafarme dtypes are the same as the input.
957
+
958
+ Non-regression test for gh-24860.
959
+ """
960
+ pd = pytest.importorskip("pandas")
961
+
962
+ X, y = load_iris(return_X_y=True, as_frame=True)
963
+ X = X.astype(
964
+ {
965
+ "petal length (cm)": np.float32,
966
+ "petal width (cm)": np.float64,
967
+ }
968
+ )
969
+ X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10)
970
+
971
+ column_order = X.columns
972
+
973
+ def selector(X, y):
974
+ ranking = {
975
+ "sepal length (cm)": 1,
976
+ "sepal width (cm)": 2,
977
+ "petal length (cm)": 3,
978
+ "petal width (cm)": 4,
979
+ "petal_width_binned": 5,
980
+ }
981
+ return np.asarray([ranking[name] for name in column_order])
982
+
983
+ univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas")
984
+ output = univariate_filter.fit_transform(X, y)
985
+
986
+ assert_array_equal(
987
+ output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"]
988
+ )
989
+ for name, dtype in output.dtypes.items():
990
+ assert dtype == X.dtypes[name]
991
+
992
+
993
+ @pytest.mark.parametrize(
994
+ "selector",
995
+ [
996
+ SelectKBest(k=4),
997
+ SelectPercentile(percentile=80),
998
+ GenericUnivariateSelect(mode="k_best", param=4),
999
+ GenericUnivariateSelect(mode="percentile", param=80),
1000
+ ],
1001
+ )
1002
+ def test_unsupervised_filter(selector):
1003
+ """Check support for unsupervised feature selection for the filter that could
1004
+ require only `X`.
1005
+ """
1006
+ rng = np.random.RandomState(0)
1007
+ X = rng.randn(10, 5)
1008
+
1009
+ def score_func(X, y=None):
1010
+ return np.array([1, 1, 1, 1, 0])
1011
+
1012
+ selector.set_params(score_func=score_func)
1013
+ selector.fit(X)
1014
+ X_trans = selector.transform(X)
1015
+ assert_allclose(X_trans, X[:, :4])
1016
+ X_trans = selector.fit_transform(X)
1017
+ assert_allclose(X_trans, X[:, :4])
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import warnings
3
+ from unittest.mock import Mock
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from sklearn import datasets
9
+ from sklearn.base import BaseEstimator
10
+ from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression
11
+ from sklearn.datasets import make_friedman1
12
+ from sklearn.decomposition import PCA
13
+ from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier
14
+ from sklearn.exceptions import NotFittedError
15
+ from sklearn.feature_selection import SelectFromModel
16
+ from sklearn.linear_model import (
17
+ ElasticNet,
18
+ ElasticNetCV,
19
+ Lasso,
20
+ LassoCV,
21
+ LinearRegression,
22
+ LogisticRegression,
23
+ PassiveAggressiveClassifier,
24
+ SGDClassifier,
25
+ )
26
+ from sklearn.pipeline import make_pipeline
27
+ from sklearn.svm import LinearSVC
28
+ from sklearn.utils._testing import (
29
+ MinimalClassifier,
30
+ assert_allclose,
31
+ assert_array_almost_equal,
32
+ assert_array_equal,
33
+ skip_if_32bit,
34
+ )
35
+
36
+
37
+ class NaNTag(BaseEstimator):
38
+ def _more_tags(self):
39
+ return {"allow_nan": True}
40
+
41
+
42
+ class NoNaNTag(BaseEstimator):
43
+ def _more_tags(self):
44
+ return {"allow_nan": False}
45
+
46
+
47
+ class NaNTagRandomForest(RandomForestClassifier):
48
+ def _more_tags(self):
49
+ return {"allow_nan": True}
50
+
51
+
52
+ iris = datasets.load_iris()
53
+ data, y = iris.data, iris.target
54
+ rng = np.random.RandomState(0)
55
+
56
+
57
+ def test_invalid_input():
58
+ clf = SGDClassifier(
59
+ alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None
60
+ )
61
+ for threshold in ["gobbledigook", ".5 * gobbledigook"]:
62
+ model = SelectFromModel(clf, threshold=threshold)
63
+ model.fit(data, y)
64
+ with pytest.raises(ValueError):
65
+ model.transform(data)
66
+
67
+
68
+ def test_input_estimator_unchanged():
69
+ # Test that SelectFromModel fits on a clone of the estimator.
70
+ est = RandomForestClassifier()
71
+ transformer = SelectFromModel(estimator=est)
72
+ transformer.fit(data, y)
73
+ assert transformer.estimator is est
74
+
75
+
76
+ @pytest.mark.parametrize(
77
+ "max_features, err_type, err_msg",
78
+ [
79
+ (
80
+ data.shape[1] + 1,
81
+ ValueError,
82
+ "max_features ==",
83
+ ),
84
+ (
85
+ lambda X: 1.5,
86
+ TypeError,
87
+ "max_features must be an instance of int, not float.",
88
+ ),
89
+ (
90
+ lambda X: data.shape[1] + 1,
91
+ ValueError,
92
+ "max_features ==",
93
+ ),
94
+ (
95
+ lambda X: -1,
96
+ ValueError,
97
+ "max_features ==",
98
+ ),
99
+ ],
100
+ )
101
+ def test_max_features_error(max_features, err_type, err_msg):
102
+ err_msg = re.escape(err_msg)
103
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
104
+
105
+ transformer = SelectFromModel(
106
+ estimator=clf, max_features=max_features, threshold=-np.inf
107
+ )
108
+ with pytest.raises(err_type, match=err_msg):
109
+ transformer.fit(data, y)
110
+
111
+
112
+ @pytest.mark.parametrize("max_features", [0, 2, data.shape[1], None])
113
+ def test_inferred_max_features_integer(max_features):
114
+ """Check max_features_ and output shape for integer max_features."""
115
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
116
+ transformer = SelectFromModel(
117
+ estimator=clf, max_features=max_features, threshold=-np.inf
118
+ )
119
+ X_trans = transformer.fit_transform(data, y)
120
+ if max_features is not None:
121
+ assert transformer.max_features_ == max_features
122
+ assert X_trans.shape[1] == transformer.max_features_
123
+ else:
124
+ assert not hasattr(transformer, "max_features_")
125
+ assert X_trans.shape[1] == data.shape[1]
126
+
127
+
128
+ @pytest.mark.parametrize(
129
+ "max_features",
130
+ [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)],
131
+ )
132
+ def test_inferred_max_features_callable(max_features):
133
+ """Check max_features_ and output shape for callable max_features."""
134
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
135
+ transformer = SelectFromModel(
136
+ estimator=clf, max_features=max_features, threshold=-np.inf
137
+ )
138
+ X_trans = transformer.fit_transform(data, y)
139
+ assert transformer.max_features_ == max_features(data)
140
+ assert X_trans.shape[1] == transformer.max_features_
141
+
142
+
143
+ @pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2])
144
+ def test_max_features_array_like(max_features):
145
+ X = [
146
+ [0.87, -1.34, 0.31],
147
+ [-2.79, -0.02, -0.85],
148
+ [-1.34, -0.48, -2.55],
149
+ [1.92, 1.48, 0.65],
150
+ ]
151
+ y = [0, 1, 0, 1]
152
+
153
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
154
+ transformer = SelectFromModel(
155
+ estimator=clf, max_features=max_features, threshold=-np.inf
156
+ )
157
+ X_trans = transformer.fit_transform(X, y)
158
+ assert X_trans.shape[1] == transformer.max_features_
159
+
160
+
161
+ @pytest.mark.parametrize(
162
+ "max_features",
163
+ [lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1],
164
+ )
165
+ def test_max_features_callable_data(max_features):
166
+ """Tests that the callable passed to `fit` is called on X."""
167
+ clf = RandomForestClassifier(n_estimators=50, random_state=0)
168
+ m = Mock(side_effect=max_features)
169
+ transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf)
170
+ transformer.fit_transform(data, y)
171
+ m.assert_called_with(data)
172
+
173
+
174
+ class FixedImportanceEstimator(BaseEstimator):
175
+ def __init__(self, importances):
176
+ self.importances = importances
177
+
178
+ def fit(self, X, y=None):
179
+ self.feature_importances_ = np.array(self.importances)
180
+
181
+
182
+ def test_max_features():
183
+ # Test max_features parameter using various values
184
+ X, y = datasets.make_classification(
185
+ n_samples=1000,
186
+ n_features=10,
187
+ n_informative=3,
188
+ n_redundant=0,
189
+ n_repeated=0,
190
+ shuffle=False,
191
+ random_state=0,
192
+ )
193
+ max_features = X.shape[1]
194
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
195
+
196
+ transformer1 = SelectFromModel(estimator=est, threshold=-np.inf)
197
+ transformer2 = SelectFromModel(
198
+ estimator=est, max_features=max_features, threshold=-np.inf
199
+ )
200
+ X_new1 = transformer1.fit_transform(X, y)
201
+ X_new2 = transformer2.fit_transform(X, y)
202
+ assert_allclose(X_new1, X_new2)
203
+
204
+ # Test max_features against actual model.
205
+ transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42))
206
+ X_new1 = transformer1.fit_transform(X, y)
207
+ scores1 = np.abs(transformer1.estimator_.coef_)
208
+ candidate_indices1 = np.argsort(-scores1, kind="mergesort")
209
+
210
+ for n_features in range(1, X_new1.shape[1] + 1):
211
+ transformer2 = SelectFromModel(
212
+ estimator=Lasso(alpha=0.025, random_state=42),
213
+ max_features=n_features,
214
+ threshold=-np.inf,
215
+ )
216
+ X_new2 = transformer2.fit_transform(X, y)
217
+ scores2 = np.abs(transformer2.estimator_.coef_)
218
+ candidate_indices2 = np.argsort(-scores2, kind="mergesort")
219
+ assert_allclose(
220
+ X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]]
221
+ )
222
+ assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_)
223
+
224
+
225
+ def test_max_features_tiebreak():
226
+ # Test if max_features can break tie among feature importance
227
+ X, y = datasets.make_classification(
228
+ n_samples=1000,
229
+ n_features=10,
230
+ n_informative=3,
231
+ n_redundant=0,
232
+ n_repeated=0,
233
+ shuffle=False,
234
+ random_state=0,
235
+ )
236
+ max_features = X.shape[1]
237
+
238
+ feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1])
239
+ for n_features in range(1, max_features + 1):
240
+ transformer = SelectFromModel(
241
+ FixedImportanceEstimator(feature_importances),
242
+ max_features=n_features,
243
+ threshold=-np.inf,
244
+ )
245
+ X_new = transformer.fit_transform(X, y)
246
+ selected_feature_indices = np.where(transformer._get_support_mask())[0]
247
+ assert_array_equal(selected_feature_indices, np.arange(n_features))
248
+ assert X_new.shape[1] == n_features
249
+
250
+
251
+ def test_threshold_and_max_features():
252
+ X, y = datasets.make_classification(
253
+ n_samples=1000,
254
+ n_features=10,
255
+ n_informative=3,
256
+ n_redundant=0,
257
+ n_repeated=0,
258
+ shuffle=False,
259
+ random_state=0,
260
+ )
261
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
262
+
263
+ transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf)
264
+ X_new1 = transformer1.fit_transform(X, y)
265
+
266
+ transformer2 = SelectFromModel(estimator=est, threshold=0.04)
267
+ X_new2 = transformer2.fit_transform(X, y)
268
+
269
+ transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04)
270
+ X_new3 = transformer3.fit_transform(X, y)
271
+ assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1])
272
+ selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :])
273
+ assert_allclose(X_new3, X[:, selected_indices[0]])
274
+
275
+
276
+ @skip_if_32bit
277
+ def test_feature_importances():
278
+ X, y = datasets.make_classification(
279
+ n_samples=1000,
280
+ n_features=10,
281
+ n_informative=3,
282
+ n_redundant=0,
283
+ n_repeated=0,
284
+ shuffle=False,
285
+ random_state=0,
286
+ )
287
+
288
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
289
+ for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
290
+ transformer = SelectFromModel(estimator=est, threshold=threshold)
291
+ transformer.fit(X, y)
292
+ assert hasattr(transformer.estimator_, "feature_importances_")
293
+
294
+ X_new = transformer.transform(X)
295
+ assert X_new.shape[1] < X.shape[1]
296
+ importances = transformer.estimator_.feature_importances_
297
+
298
+ feature_mask = np.abs(importances) > func(importances)
299
+ assert_array_almost_equal(X_new, X[:, feature_mask])
300
+
301
+
302
+ def test_sample_weight():
303
+ # Ensure sample weights are passed to underlying estimator
304
+ X, y = datasets.make_classification(
305
+ n_samples=100,
306
+ n_features=10,
307
+ n_informative=3,
308
+ n_redundant=0,
309
+ n_repeated=0,
310
+ shuffle=False,
311
+ random_state=0,
312
+ )
313
+
314
+ # Check with sample weights
315
+ sample_weight = np.ones(y.shape)
316
+ sample_weight[y == 1] *= 100
317
+
318
+ est = LogisticRegression(random_state=0, fit_intercept=False)
319
+ transformer = SelectFromModel(estimator=est)
320
+ transformer.fit(X, y, sample_weight=None)
321
+ mask = transformer._get_support_mask()
322
+ transformer.fit(X, y, sample_weight=sample_weight)
323
+ weighted_mask = transformer._get_support_mask()
324
+ assert not np.all(weighted_mask == mask)
325
+ transformer.fit(X, y, sample_weight=3 * sample_weight)
326
+ reweighted_mask = transformer._get_support_mask()
327
+ assert np.all(weighted_mask == reweighted_mask)
328
+
329
+
330
+ @pytest.mark.parametrize(
331
+ "estimator",
332
+ [
333
+ Lasso(alpha=0.1, random_state=42),
334
+ LassoCV(random_state=42),
335
+ ElasticNet(l1_ratio=1, random_state=42),
336
+ ElasticNetCV(l1_ratio=[1], random_state=42),
337
+ ],
338
+ )
339
+ def test_coef_default_threshold(estimator):
340
+ X, y = datasets.make_classification(
341
+ n_samples=100,
342
+ n_features=10,
343
+ n_informative=3,
344
+ n_redundant=0,
345
+ n_repeated=0,
346
+ shuffle=False,
347
+ random_state=0,
348
+ )
349
+
350
+ # For the Lasso and related models, the threshold defaults to 1e-5
351
+ transformer = SelectFromModel(estimator=estimator)
352
+ transformer.fit(X, y)
353
+ X_new = transformer.transform(X)
354
+ mask = np.abs(transformer.estimator_.coef_) > 1e-5
355
+ assert_array_almost_equal(X_new, X[:, mask])
356
+
357
+
358
+ @skip_if_32bit
359
+ def test_2d_coef():
360
+ X, y = datasets.make_classification(
361
+ n_samples=1000,
362
+ n_features=10,
363
+ n_informative=3,
364
+ n_redundant=0,
365
+ n_repeated=0,
366
+ shuffle=False,
367
+ random_state=0,
368
+ n_classes=4,
369
+ )
370
+
371
+ est = LogisticRegression()
372
+ for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
373
+ for order in [1, 2, np.inf]:
374
+ # Fit SelectFromModel a multi-class problem
375
+ transformer = SelectFromModel(
376
+ estimator=LogisticRegression(), threshold=threshold, norm_order=order
377
+ )
378
+ transformer.fit(X, y)
379
+ assert hasattr(transformer.estimator_, "coef_")
380
+ X_new = transformer.transform(X)
381
+ assert X_new.shape[1] < X.shape[1]
382
+
383
+ # Manually check that the norm is correctly performed
384
+ est.fit(X, y)
385
+ importances = np.linalg.norm(est.coef_, axis=0, ord=order)
386
+ feature_mask = importances > func(importances)
387
+ assert_array_almost_equal(X_new, X[:, feature_mask])
388
+
389
+
390
+ def test_partial_fit():
391
+ est = PassiveAggressiveClassifier(
392
+ random_state=0, shuffle=False, max_iter=5, tol=None
393
+ )
394
+ transformer = SelectFromModel(estimator=est)
395
+ transformer.partial_fit(data, y, classes=np.unique(y))
396
+ old_model = transformer.estimator_
397
+ transformer.partial_fit(data, y, classes=np.unique(y))
398
+ new_model = transformer.estimator_
399
+ assert old_model is new_model
400
+
401
+ X_transform = transformer.transform(data)
402
+ transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
403
+ assert_array_almost_equal(X_transform, transformer.transform(data))
404
+
405
+ # check that if est doesn't have partial_fit, neither does SelectFromModel
406
+ transformer = SelectFromModel(estimator=RandomForestClassifier())
407
+ assert not hasattr(transformer, "partial_fit")
408
+
409
+
410
+ def test_calling_fit_reinitializes():
411
+ est = LinearSVC(dual="auto", random_state=0)
412
+ transformer = SelectFromModel(estimator=est)
413
+ transformer.fit(data, y)
414
+ transformer.set_params(estimator__C=100)
415
+ transformer.fit(data, y)
416
+ assert transformer.estimator_.C == 100
417
+
418
+
419
+ def test_prefit():
420
+ # Test all possible combinations of the prefit parameter.
421
+
422
+ # Passing a prefit parameter with the selected model
423
+ # and fitting a unfit model with prefit=False should give same results.
424
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
425
+ model = SelectFromModel(clf)
426
+ model.fit(data, y)
427
+ X_transform = model.transform(data)
428
+ clf.fit(data, y)
429
+ model = SelectFromModel(clf, prefit=True)
430
+ assert_array_almost_equal(model.transform(data), X_transform)
431
+ model.fit(data, y)
432
+ assert model.estimator_ is not clf
433
+
434
+ # Check that the model is rewritten if prefit=False and a fitted model is
435
+ # passed
436
+ model = SelectFromModel(clf, prefit=False)
437
+ model.fit(data, y)
438
+ assert_array_almost_equal(model.transform(data), X_transform)
439
+
440
+ # Check that passing an unfitted estimator with `prefit=True` raises a
441
+ # `ValueError`
442
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
443
+ model = SelectFromModel(clf, prefit=True)
444
+ err_msg = "When `prefit=True`, `estimator` is expected to be a fitted estimator."
445
+ with pytest.raises(NotFittedError, match=err_msg):
446
+ model.fit(data, y)
447
+ with pytest.raises(NotFittedError, match=err_msg):
448
+ model.partial_fit(data, y)
449
+ with pytest.raises(NotFittedError, match=err_msg):
450
+ model.transform(data)
451
+
452
+ # Check that the internal parameters of prefitted model are not changed
453
+ # when calling `fit` or `partial_fit` with `prefit=True`
454
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, tol=None).fit(data, y)
455
+ model = SelectFromModel(clf, prefit=True)
456
+ model.fit(data, y)
457
+ assert_allclose(model.estimator_.coef_, clf.coef_)
458
+ model.partial_fit(data, y)
459
+ assert_allclose(model.estimator_.coef_, clf.coef_)
460
+
461
+
462
+ def test_prefit_max_features():
463
+ """Check the interaction between `prefit` and `max_features`."""
464
+ # case 1: an error should be raised at `transform` if `fit` was not called to
465
+ # validate the attributes
466
+ estimator = RandomForestClassifier(n_estimators=5, random_state=0)
467
+ estimator.fit(data, y)
468
+ model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1])
469
+
470
+ err_msg = (
471
+ "When `prefit=True` and `max_features` is a callable, call `fit` "
472
+ "before calling `transform`."
473
+ )
474
+ with pytest.raises(NotFittedError, match=err_msg):
475
+ model.transform(data)
476
+
477
+ # case 2: `max_features` is not validated and different from an integer
478
+ # FIXME: we cannot validate the upper bound of the attribute at transform
479
+ # and we should force calling `fit` if we intend to force the attribute
480
+ # to have such an upper bound.
481
+ max_features = 2.5
482
+ model.set_params(max_features=max_features)
483
+ with pytest.raises(ValueError, match="`max_features` must be an integer"):
484
+ model.transform(data)
485
+
486
+
487
+ def test_prefit_get_feature_names_out():
488
+ """Check the interaction between prefit and the feature names."""
489
+ clf = RandomForestClassifier(n_estimators=2, random_state=0)
490
+ clf.fit(data, y)
491
+ model = SelectFromModel(clf, prefit=True, max_features=1)
492
+
493
+ name = type(model).__name__
494
+ err_msg = (
495
+ f"This {name} instance is not fitted yet. Call 'fit' with "
496
+ "appropriate arguments before using this estimator."
497
+ )
498
+ with pytest.raises(NotFittedError, match=err_msg):
499
+ model.get_feature_names_out()
500
+
501
+ model.fit(data, y)
502
+ feature_names = model.get_feature_names_out()
503
+ assert feature_names == ["x3"]
504
+
505
+
506
+ def test_threshold_string():
507
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
508
+ model = SelectFromModel(est, threshold="0.5*mean")
509
+ model.fit(data, y)
510
+ X_transform = model.transform(data)
511
+
512
+ # Calculate the threshold from the estimator directly.
513
+ est.fit(data, y)
514
+ threshold = 0.5 * np.mean(est.feature_importances_)
515
+ mask = est.feature_importances_ > threshold
516
+ assert_array_almost_equal(X_transform, data[:, mask])
517
+
518
+
519
+ def test_threshold_without_refitting():
520
+ # Test that the threshold can be set without refitting the model.
521
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
522
+ model = SelectFromModel(clf, threshold="0.1 * mean")
523
+ model.fit(data, y)
524
+ X_transform = model.transform(data)
525
+
526
+ # Set a higher threshold to filter out more features.
527
+ model.threshold = "1.0 * mean"
528
+ assert X_transform.shape[1] > model.transform(data).shape[1]
529
+
530
+
531
+ def test_fit_accepts_nan_inf():
532
+ # Test that fit doesn't check for np.inf and np.nan values.
533
+ clf = HistGradientBoostingClassifier(random_state=0)
534
+
535
+ model = SelectFromModel(estimator=clf)
536
+
537
+ nan_data = data.copy()
538
+ nan_data[0] = np.nan
539
+ nan_data[1] = np.inf
540
+
541
+ model.fit(data, y)
542
+
543
+
544
+ def test_transform_accepts_nan_inf():
545
+ # Test that transform doesn't check for np.inf and np.nan values.
546
+ clf = NaNTagRandomForest(n_estimators=100, random_state=0)
547
+ nan_data = data.copy()
548
+
549
+ model = SelectFromModel(estimator=clf)
550
+ model.fit(nan_data, y)
551
+
552
+ nan_data[0] = np.nan
553
+ nan_data[1] = np.inf
554
+
555
+ model.transform(nan_data)
556
+
557
+
558
+ def test_allow_nan_tag_comes_from_estimator():
559
+ allow_nan_est = NaNTag()
560
+ model = SelectFromModel(estimator=allow_nan_est)
561
+ assert model._get_tags()["allow_nan"] is True
562
+
563
+ no_nan_est = NoNaNTag()
564
+ model = SelectFromModel(estimator=no_nan_est)
565
+ assert model._get_tags()["allow_nan"] is False
566
+
567
+
568
+ def _pca_importances(pca_estimator):
569
+ return np.abs(pca_estimator.explained_variance_)
570
+
571
+
572
+ @pytest.mark.parametrize(
573
+ "estimator, importance_getter",
574
+ [
575
+ (
576
+ make_pipeline(PCA(random_state=0), LogisticRegression()),
577
+ "named_steps.logisticregression.coef_",
578
+ ),
579
+ (PCA(random_state=0), _pca_importances),
580
+ ],
581
+ )
582
+ def test_importance_getter(estimator, importance_getter):
583
+ selector = SelectFromModel(
584
+ estimator, threshold="mean", importance_getter=importance_getter
585
+ )
586
+ selector.fit(data, y)
587
+ assert selector.transform(data).shape[1] == 1
588
+
589
+
590
+ @pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression])
591
+ def test_select_from_model_pls(PLSEstimator):
592
+ """Check the behaviour of SelectFromModel with PLS estimators.
593
+
594
+ Non-regression test for:
595
+ https://github.com/scikit-learn/scikit-learn/issues/12410
596
+ """
597
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
598
+ estimator = PLSEstimator(n_components=1)
599
+ model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y)
600
+ assert model.score(X, y) > 0.5
601
+
602
+
603
+ def test_estimator_does_not_support_feature_names():
604
+ """SelectFromModel works with estimators that do not support feature_names_in_.
605
+
606
+ Non-regression test for #21949.
607
+ """
608
+ pytest.importorskip("pandas")
609
+ X, y = datasets.load_iris(as_frame=True, return_X_y=True)
610
+ all_feature_names = set(X.columns)
611
+
612
+ def importance_getter(estimator):
613
+ return np.arange(X.shape[1])
614
+
615
+ selector = SelectFromModel(
616
+ MinimalClassifier(), importance_getter=importance_getter
617
+ ).fit(X, y)
618
+
619
+ # selector learns the feature names itself
620
+ assert_array_equal(selector.feature_names_in_, X.columns)
621
+
622
+ feature_names_out = set(selector.get_feature_names_out())
623
+ assert feature_names_out < all_feature_names
624
+
625
+ with warnings.catch_warnings():
626
+ warnings.simplefilter("error", UserWarning)
627
+
628
+ selector.transform(X.iloc[1:3])
629
+
630
+
631
+ @pytest.mark.parametrize(
632
+ "error, err_msg, max_features",
633
+ (
634
+ [ValueError, "max_features == 10, must be <= 4", 10],
635
+ [ValueError, "max_features == 5, must be <= 4", lambda x: x.shape[1] + 1],
636
+ ),
637
+ )
638
+ def test_partial_fit_validate_max_features(error, err_msg, max_features):
639
+ """Test that partial_fit from SelectFromModel validates `max_features`."""
640
+ X, y = datasets.make_classification(
641
+ n_samples=100,
642
+ n_features=4,
643
+ random_state=0,
644
+ )
645
+
646
+ with pytest.raises(error, match=err_msg):
647
+ SelectFromModel(
648
+ estimator=SGDClassifier(), max_features=max_features
649
+ ).partial_fit(X, y, classes=[0, 1])
650
+
651
+
652
+ @pytest.mark.parametrize("as_frame", [True, False])
653
+ def test_partial_fit_validate_feature_names(as_frame):
654
+ """Test that partial_fit from SelectFromModel validates `feature_names_in_`."""
655
+ pytest.importorskip("pandas")
656
+ X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True)
657
+
658
+ selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit(
659
+ X, y, classes=[0, 1, 2]
660
+ )
661
+ if as_frame:
662
+ assert_array_equal(selector.feature_names_in_, X.columns)
663
+ else:
664
+ assert not hasattr(selector, "feature_names_in_")
665
+
666
+
667
+ def test_from_model_estimator_attribute_error():
668
+ """Check that we raise the proper AttributeError when the estimator
669
+ does not implement the `partial_fit` method, which is decorated with
670
+ `available_if`.
671
+
672
+ Non-regression test for:
673
+ https://github.com/scikit-learn/scikit-learn/issues/28108
674
+ """
675
+ # `LinearRegression` does not implement 'partial_fit' and should raise an
676
+ # AttributeError
677
+ from_model = SelectFromModel(estimator=LinearRegression())
678
+
679
+ outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'"
680
+ inner_msg = "'LinearRegression' object has no attribute 'partial_fit'"
681
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
682
+ from_model.fit(data, y).partial_fit(data)
683
+ assert isinstance(exec_info.value.__cause__, AttributeError)
684
+ assert inner_msg in str(exec_info.value.__cause__)
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.feature_selection import mutual_info_classif, mutual_info_regression
5
+ from sklearn.feature_selection._mutual_info import _compute_mi
6
+ from sklearn.utils import check_random_state
7
+ from sklearn.utils._testing import (
8
+ assert_allclose,
9
+ assert_array_equal,
10
+ )
11
+ from sklearn.utils.fixes import CSR_CONTAINERS
12
+
13
+
14
+ def test_compute_mi_dd():
15
+ # In discrete case computations are straightforward and can be done
16
+ # by hand on given vectors.
17
+ x = np.array([0, 1, 1, 0, 0])
18
+ y = np.array([1, 0, 0, 0, 1])
19
+
20
+ H_x = H_y = -(3 / 5) * np.log(3 / 5) - (2 / 5) * np.log(2 / 5)
21
+ H_xy = -1 / 5 * np.log(1 / 5) - 2 / 5 * np.log(2 / 5) - 2 / 5 * np.log(2 / 5)
22
+ I_xy = H_x + H_y - H_xy
23
+
24
+ assert_allclose(_compute_mi(x, y, x_discrete=True, y_discrete=True), I_xy)
25
+
26
+
27
+ def test_compute_mi_cc(global_dtype):
28
+ # For two continuous variables a good approach is to test on bivariate
29
+ # normal distribution, where mutual information is known.
30
+
31
+ # Mean of the distribution, irrelevant for mutual information.
32
+ mean = np.zeros(2)
33
+
34
+ # Setup covariance matrix with correlation coeff. equal 0.5.
35
+ sigma_1 = 1
36
+ sigma_2 = 10
37
+ corr = 0.5
38
+ cov = np.array(
39
+ [
40
+ [sigma_1**2, corr * sigma_1 * sigma_2],
41
+ [corr * sigma_1 * sigma_2, sigma_2**2],
42
+ ]
43
+ )
44
+
45
+ # True theoretical mutual information.
46
+ I_theory = np.log(sigma_1) + np.log(sigma_2) - 0.5 * np.log(np.linalg.det(cov))
47
+
48
+ rng = check_random_state(0)
49
+ Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False)
50
+
51
+ x, y = Z[:, 0], Z[:, 1]
52
+
53
+ # Theory and computed values won't be very close
54
+ # We here check with a large relative tolerance
55
+ for n_neighbors in [3, 5, 7]:
56
+ I_computed = _compute_mi(
57
+ x, y, x_discrete=False, y_discrete=False, n_neighbors=n_neighbors
58
+ )
59
+ assert_allclose(I_computed, I_theory, rtol=1e-1)
60
+
61
+
62
+ def test_compute_mi_cd(global_dtype):
63
+ # To test define a joint distribution as follows:
64
+ # p(x, y) = p(x) p(y | x)
65
+ # X ~ Bernoulli(p)
66
+ # (Y | x = 0) ~ Uniform(-1, 1)
67
+ # (Y | x = 1) ~ Uniform(0, 2)
68
+
69
+ # Use the following formula for mutual information:
70
+ # I(X; Y) = H(Y) - H(Y | X)
71
+ # Two entropies can be computed by hand:
72
+ # H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2)
73
+ # H(Y | X) = ln(2)
74
+
75
+ # Now we need to implement sampling from out distribution, which is
76
+ # done easily using conditional distribution logic.
77
+
78
+ n_samples = 1000
79
+ rng = check_random_state(0)
80
+
81
+ for p in [0.3, 0.5, 0.7]:
82
+ x = rng.uniform(size=n_samples) > p
83
+
84
+ y = np.empty(n_samples, global_dtype)
85
+ mask = x == 0
86
+ y[mask] = rng.uniform(-1, 1, size=np.sum(mask))
87
+ y[~mask] = rng.uniform(0, 2, size=np.sum(~mask))
88
+
89
+ I_theory = -0.5 * (
90
+ (1 - p) * np.log(0.5 * (1 - p)) + p * np.log(0.5 * p) + np.log(0.5)
91
+ ) - np.log(2)
92
+
93
+ # Assert the same tolerance.
94
+ for n_neighbors in [3, 5, 7]:
95
+ I_computed = _compute_mi(
96
+ x, y, x_discrete=True, y_discrete=False, n_neighbors=n_neighbors
97
+ )
98
+ assert_allclose(I_computed, I_theory, rtol=1e-1)
99
+
100
+
101
+ def test_compute_mi_cd_unique_label(global_dtype):
102
+ # Test that adding unique label doesn't change MI.
103
+ n_samples = 100
104
+ x = np.random.uniform(size=n_samples) > 0.5
105
+
106
+ y = np.empty(n_samples, global_dtype)
107
+ mask = x == 0
108
+ y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
109
+ y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
110
+
111
+ mi_1 = _compute_mi(x, y, x_discrete=True, y_discrete=False)
112
+
113
+ x = np.hstack((x, 2))
114
+ y = np.hstack((y, 10))
115
+ mi_2 = _compute_mi(x, y, x_discrete=True, y_discrete=False)
116
+
117
+ assert_allclose(mi_1, mi_2)
118
+
119
+
120
+ # We are going test that feature ordering by MI matches our expectations.
121
+ def test_mutual_info_classif_discrete(global_dtype):
122
+ X = np.array(
123
+ [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype
124
+ )
125
+ y = np.array([0, 1, 2, 2, 1])
126
+
127
+ # Here X[:, 0] is the most informative feature, and X[:, 1] is weakly
128
+ # informative.
129
+ mi = mutual_info_classif(X, y, discrete_features=True)
130
+ assert_array_equal(np.argsort(-mi), np.array([0, 2, 1]))
131
+
132
+
133
+ def test_mutual_info_regression(global_dtype):
134
+ # We generate sample from multivariate normal distribution, using
135
+ # transformation from initially uncorrelated variables. The zero
136
+ # variables after transformation is selected as the target vector,
137
+ # it has the strongest correlation with the variable 2, and
138
+ # the weakest correlation with the variable 1.
139
+ T = np.array([[1, 0.5, 2, 1], [0, 1, 0.1, 0.0], [0, 0.1, 1, 0.1], [0, 0.1, 0.1, 1]])
140
+ cov = T.dot(T.T)
141
+ mean = np.zeros(4)
142
+
143
+ rng = check_random_state(0)
144
+ Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False)
145
+ X = Z[:, 1:]
146
+ y = Z[:, 0]
147
+
148
+ mi = mutual_info_regression(X, y, random_state=0)
149
+ assert_array_equal(np.argsort(-mi), np.array([1, 2, 0]))
150
+ # XXX: should mutual_info_regression be fixed to avoid
151
+ # up-casting float32 inputs to float64?
152
+ assert mi.dtype == np.float64
153
+
154
+
155
+ def test_mutual_info_classif_mixed(global_dtype):
156
+ # Here the target is discrete and there are two continuous and one
157
+ # discrete feature. The idea of this test is clear from the code.
158
+ rng = check_random_state(0)
159
+ X = rng.rand(1000, 3).astype(global_dtype, copy=False)
160
+ X[:, 1] += X[:, 0]
161
+ y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int)
162
+ X[:, 2] = X[:, 2] > 0.5
163
+
164
+ mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3, random_state=0)
165
+ assert_array_equal(np.argsort(-mi), [2, 0, 1])
166
+ for n_neighbors in [5, 7, 9]:
167
+ mi_nn = mutual_info_classif(
168
+ X, y, discrete_features=[2], n_neighbors=n_neighbors, random_state=0
169
+ )
170
+ # Check that the continuous values have an higher MI with greater
171
+ # n_neighbors
172
+ assert mi_nn[0] > mi[0]
173
+ assert mi_nn[1] > mi[1]
174
+ # The n_neighbors should not have any effect on the discrete value
175
+ # The MI should be the same
176
+ assert mi_nn[2] == mi[2]
177
+
178
+
179
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
180
+ def test_mutual_info_options(global_dtype, csr_container):
181
+ X = np.array(
182
+ [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype
183
+ )
184
+ y = np.array([0, 1, 2, 2, 1], dtype=global_dtype)
185
+ X_csr = csr_container(X)
186
+
187
+ for mutual_info in (mutual_info_regression, mutual_info_classif):
188
+ with pytest.raises(ValueError):
189
+ mutual_info(X_csr, y, discrete_features=False)
190
+ with pytest.raises(ValueError):
191
+ mutual_info(X, y, discrete_features="manual")
192
+ with pytest.raises(ValueError):
193
+ mutual_info(X_csr, y, discrete_features=[True, False, True])
194
+ with pytest.raises(IndexError):
195
+ mutual_info(X, y, discrete_features=[True, False, True, False])
196
+ with pytest.raises(IndexError):
197
+ mutual_info(X, y, discrete_features=[1, 4])
198
+
199
+ mi_1 = mutual_info(X, y, discrete_features="auto", random_state=0)
200
+ mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)
201
+ mi_3 = mutual_info(X_csr, y, discrete_features="auto", random_state=0)
202
+ mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0)
203
+ mi_5 = mutual_info(X, y, discrete_features=[True, False, True], random_state=0)
204
+ mi_6 = mutual_info(X, y, discrete_features=[0, 2], random_state=0)
205
+
206
+ assert_allclose(mi_1, mi_2)
207
+ assert_allclose(mi_3, mi_4)
208
+ assert_allclose(mi_5, mi_6)
209
+
210
+ assert not np.allclose(mi_1, mi_3)
211
+
212
+
213
+ @pytest.mark.parametrize("correlated", [True, False])
214
+ def test_mutual_information_symmetry_classif_regression(correlated, global_random_seed):
215
+ """Check that `mutual_info_classif` and `mutual_info_regression` are
216
+ symmetric by switching the target `y` as `feature` in `X` and vice
217
+ versa.
218
+
219
+ Non-regression test for:
220
+ https://github.com/scikit-learn/scikit-learn/issues/23720
221
+ """
222
+ rng = np.random.RandomState(global_random_seed)
223
+ n = 100
224
+ d = rng.randint(10, size=n)
225
+
226
+ if correlated:
227
+ c = d.astype(np.float64)
228
+ else:
229
+ c = rng.normal(0, 1, size=n)
230
+
231
+ mi_classif = mutual_info_classif(
232
+ c[:, None], d, discrete_features=[False], random_state=global_random_seed
233
+ )
234
+
235
+ mi_regression = mutual_info_regression(
236
+ d[:, None], c, discrete_features=[True], random_state=global_random_seed
237
+ )
238
+
239
+ assert mi_classif == pytest.approx(mi_regression)
240
+
241
+
242
+ def test_mutual_info_regression_X_int_dtype(global_random_seed):
243
+ """Check that results agree when X is integer dtype and float dtype.
244
+
245
+ Non-regression test for Issue #26696.
246
+ """
247
+ rng = np.random.RandomState(global_random_seed)
248
+ X = rng.randint(100, size=(100, 10))
249
+ X_float = X.astype(np.float64, copy=True)
250
+ y = rng.randint(100, size=100)
251
+
252
+ expected = mutual_info_regression(X_float, y, random_state=global_random_seed)
253
+ result = mutual_info_regression(X, y, random_state=global_random_seed)
254
+ assert_allclose(result, expected)
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing Recursive feature elimination
3
+ """
4
+
5
+ from operator import attrgetter
6
+
7
+ import numpy as np
8
+ import pytest
9
+ from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
10
+
11
+ from sklearn.base import BaseEstimator, ClassifierMixin
12
+ from sklearn.compose import TransformedTargetRegressor
13
+ from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression
14
+ from sklearn.datasets import load_iris, make_friedman1
15
+ from sklearn.ensemble import RandomForestClassifier
16
+ from sklearn.feature_selection import RFE, RFECV
17
+ from sklearn.impute import SimpleImputer
18
+ from sklearn.linear_model import LinearRegression, LogisticRegression
19
+ from sklearn.metrics import get_scorer, make_scorer, zero_one_loss
20
+ from sklearn.model_selection import GroupKFold, cross_val_score
21
+ from sklearn.pipeline import make_pipeline
22
+ from sklearn.preprocessing import StandardScaler
23
+ from sklearn.svm import SVC, SVR, LinearSVR
24
+ from sklearn.utils import check_random_state
25
+ from sklearn.utils._testing import ignore_warnings
26
+ from sklearn.utils.fixes import CSR_CONTAINERS
27
+
28
+
29
+ class MockClassifier:
30
+ """
31
+ Dummy classifier to test recursive feature elimination
32
+ """
33
+
34
+ def __init__(self, foo_param=0):
35
+ self.foo_param = foo_param
36
+
37
+ def fit(self, X, y):
38
+ assert len(X) == len(y)
39
+ self.coef_ = np.ones(X.shape[1], dtype=np.float64)
40
+ return self
41
+
42
+ def predict(self, T):
43
+ return T.shape[0]
44
+
45
+ predict_proba = predict
46
+ decision_function = predict
47
+ transform = predict
48
+
49
+ def score(self, X=None, y=None):
50
+ return 0.0
51
+
52
+ def get_params(self, deep=True):
53
+ return {"foo_param": self.foo_param}
54
+
55
+ def set_params(self, **params):
56
+ return self
57
+
58
+ def _more_tags(self):
59
+ return {"allow_nan": True}
60
+
61
+
62
+ def test_rfe_features_importance():
63
+ generator = check_random_state(0)
64
+ iris = load_iris()
65
+ # Add some irrelevant features. Random seed is set to make sure that
66
+ # irrelevant features are always irrelevant.
67
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
68
+ y = iris.target
69
+
70
+ clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2)
71
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
72
+ rfe.fit(X, y)
73
+ assert len(rfe.ranking_) == X.shape[1]
74
+
75
+ clf_svc = SVC(kernel="linear")
76
+ rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
77
+ rfe_svc.fit(X, y)
78
+
79
+ # Check if the supports are equal
80
+ assert_array_equal(rfe.get_support(), rfe_svc.get_support())
81
+
82
+
83
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
84
+ def test_rfe(csr_container):
85
+ generator = check_random_state(0)
86
+ iris = load_iris()
87
+ # Add some irrelevant features. Random seed is set to make sure that
88
+ # irrelevant features are always irrelevant.
89
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
90
+ X_sparse = csr_container(X)
91
+ y = iris.target
92
+
93
+ # dense model
94
+ clf = SVC(kernel="linear")
95
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
96
+ rfe.fit(X, y)
97
+ X_r = rfe.transform(X)
98
+ clf.fit(X_r, y)
99
+ assert len(rfe.ranking_) == X.shape[1]
100
+
101
+ # sparse model
102
+ clf_sparse = SVC(kernel="linear")
103
+ rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
104
+ rfe_sparse.fit(X_sparse, y)
105
+ X_r_sparse = rfe_sparse.transform(X_sparse)
106
+
107
+ assert X_r.shape == iris.data.shape
108
+ assert_array_almost_equal(X_r[:10], iris.data[:10])
109
+
110
+ assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
111
+ assert rfe.score(X, y) == clf.score(iris.data, iris.target)
112
+ assert_array_almost_equal(X_r, X_r_sparse.toarray())
113
+
114
+
115
+ def test_RFE_fit_score_params():
116
+ # Make sure RFE passes the metadata down to fit and score methods of the
117
+ # underlying estimator
118
+ class TestEstimator(BaseEstimator, ClassifierMixin):
119
+ def fit(self, X, y, prop=None):
120
+ if prop is None:
121
+ raise ValueError("fit: prop cannot be None")
122
+ self.svc_ = SVC(kernel="linear").fit(X, y)
123
+ self.coef_ = self.svc_.coef_
124
+ return self
125
+
126
+ def score(self, X, y, prop=None):
127
+ if prop is None:
128
+ raise ValueError("score: prop cannot be None")
129
+ return self.svc_.score(X, y)
130
+
131
+ X, y = load_iris(return_X_y=True)
132
+ with pytest.raises(ValueError, match="fit: prop cannot be None"):
133
+ RFE(estimator=TestEstimator()).fit(X, y)
134
+ with pytest.raises(ValueError, match="score: prop cannot be None"):
135
+ RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y)
136
+
137
+ RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y, prop="foo")
138
+
139
+
140
+ def test_rfe_percent_n_features():
141
+ # test that the results are the same
142
+ generator = check_random_state(0)
143
+ iris = load_iris()
144
+ # Add some irrelevant features. Random seed is set to make sure that
145
+ # irrelevant features are always irrelevant.
146
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
147
+ y = iris.target
148
+ # there are 10 features in the data. We select 40%.
149
+ clf = SVC(kernel="linear")
150
+ rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1)
151
+ rfe_num.fit(X, y)
152
+
153
+ rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1)
154
+ rfe_perc.fit(X, y)
155
+
156
+ assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_)
157
+ assert_array_equal(rfe_perc.support_, rfe_num.support_)
158
+
159
+
160
+ def test_rfe_mockclassifier():
161
+ generator = check_random_state(0)
162
+ iris = load_iris()
163
+ # Add some irrelevant features. Random seed is set to make sure that
164
+ # irrelevant features are always irrelevant.
165
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
166
+ y = iris.target
167
+
168
+ # dense model
169
+ clf = MockClassifier()
170
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
171
+ rfe.fit(X, y)
172
+ X_r = rfe.transform(X)
173
+ clf.fit(X_r, y)
174
+ assert len(rfe.ranking_) == X.shape[1]
175
+ assert X_r.shape == iris.data.shape
176
+
177
+
178
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
179
+ def test_rfecv(csr_container):
180
+ generator = check_random_state(0)
181
+ iris = load_iris()
182
+ # Add some irrelevant features. Random seed is set to make sure that
183
+ # irrelevant features are always irrelevant.
184
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
185
+ y = list(iris.target) # regression test: list should be supported
186
+
187
+ # Test using the score function
188
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1)
189
+ rfecv.fit(X, y)
190
+ # non-regression test for missing worst feature:
191
+
192
+ for key in rfecv.cv_results_.keys():
193
+ assert len(rfecv.cv_results_[key]) == X.shape[1]
194
+
195
+ assert len(rfecv.ranking_) == X.shape[1]
196
+ X_r = rfecv.transform(X)
197
+
198
+ # All the noisy variable were filtered out
199
+ assert_array_equal(X_r, iris.data)
200
+
201
+ # same in sparse
202
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1)
203
+ X_sparse = csr_container(X)
204
+ rfecv_sparse.fit(X_sparse, y)
205
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
206
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
207
+
208
+ # Test using a customized loss function
209
+ scoring = make_scorer(zero_one_loss, greater_is_better=False)
210
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring)
211
+ ignore_warnings(rfecv.fit)(X, y)
212
+ X_r = rfecv.transform(X)
213
+ assert_array_equal(X_r, iris.data)
214
+
215
+ # Test using a scorer
216
+ scorer = get_scorer("accuracy")
217
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer)
218
+ rfecv.fit(X, y)
219
+ X_r = rfecv.transform(X)
220
+ assert_array_equal(X_r, iris.data)
221
+
222
+ # Test fix on cv_results_
223
+ def test_scorer(estimator, X, y):
224
+ return 1.0
225
+
226
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer)
227
+ rfecv.fit(X, y)
228
+
229
+ # In the event of cross validation score ties, the expected behavior of
230
+ # RFECV is to return the FEWEST features that maximize the CV score.
231
+ # Because test_scorer always returns 1.0 in this example, RFECV should
232
+ # reduce the dimensionality to a single feature (i.e. n_features_ = 1)
233
+ assert rfecv.n_features_ == 1
234
+
235
+ # Same as the first two tests, but with step=2
236
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=2)
237
+ rfecv.fit(X, y)
238
+
239
+ for key in rfecv.cv_results_.keys():
240
+ assert len(rfecv.cv_results_[key]) == 6
241
+
242
+ assert len(rfecv.ranking_) == X.shape[1]
243
+ X_r = rfecv.transform(X)
244
+ assert_array_equal(X_r, iris.data)
245
+
246
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2)
247
+ X_sparse = csr_container(X)
248
+ rfecv_sparse.fit(X_sparse, y)
249
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
250
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
251
+
252
+ # Verifying that steps < 1 don't blow up.
253
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=0.2)
254
+ X_sparse = csr_container(X)
255
+ rfecv_sparse.fit(X_sparse, y)
256
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
257
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
258
+
259
+
260
+ def test_rfecv_mockclassifier():
261
+ generator = check_random_state(0)
262
+ iris = load_iris()
263
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
264
+ y = list(iris.target) # regression test: list should be supported
265
+
266
+ # Test using the score function
267
+ rfecv = RFECV(estimator=MockClassifier(), step=1)
268
+ rfecv.fit(X, y)
269
+ # non-regression test for missing worst feature:
270
+
271
+ for key in rfecv.cv_results_.keys():
272
+ assert len(rfecv.cv_results_[key]) == X.shape[1]
273
+
274
+ assert len(rfecv.ranking_) == X.shape[1]
275
+
276
+
277
+ def test_rfecv_verbose_output():
278
+ # Check verbose=1 is producing an output.
279
+ import sys
280
+ from io import StringIO
281
+
282
+ sys.stdout = StringIO()
283
+
284
+ generator = check_random_state(0)
285
+ iris = load_iris()
286
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
287
+ y = list(iris.target)
288
+
289
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1)
290
+ rfecv.fit(X, y)
291
+
292
+ verbose_output = sys.stdout
293
+ verbose_output.seek(0)
294
+ assert len(verbose_output.readline()) > 0
295
+
296
+
297
+ def test_rfecv_cv_results_size(global_random_seed):
298
+ generator = check_random_state(global_random_seed)
299
+ iris = load_iris()
300
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
301
+ y = list(iris.target) # regression test: list should be supported
302
+
303
+ # Non-regression test for varying combinations of step and
304
+ # min_features_to_select.
305
+ for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]:
306
+ rfecv = RFECV(
307
+ estimator=MockClassifier(),
308
+ step=step,
309
+ min_features_to_select=min_features_to_select,
310
+ )
311
+ rfecv.fit(X, y)
312
+
313
+ score_len = np.ceil((X.shape[1] - min_features_to_select) / step) + 1
314
+
315
+ for key in rfecv.cv_results_.keys():
316
+ assert len(rfecv.cv_results_[key]) == score_len
317
+
318
+ assert len(rfecv.ranking_) == X.shape[1]
319
+ assert rfecv.n_features_ >= min_features_to_select
320
+
321
+
322
+ def test_rfe_estimator_tags():
323
+ rfe = RFE(SVC(kernel="linear"))
324
+ assert rfe._estimator_type == "classifier"
325
+ # make sure that cross-validation is stratified
326
+ iris = load_iris()
327
+ score = cross_val_score(rfe, iris.data, iris.target)
328
+ assert score.min() > 0.7
329
+
330
+
331
+ def test_rfe_min_step(global_random_seed):
332
+ n_features = 10
333
+ X, y = make_friedman1(
334
+ n_samples=50, n_features=n_features, random_state=global_random_seed
335
+ )
336
+ n_samples, n_features = X.shape
337
+ estimator = SVR(kernel="linear")
338
+
339
+ # Test when floor(step * n_features) <= 0
340
+ selector = RFE(estimator, step=0.01)
341
+ sel = selector.fit(X, y)
342
+ assert sel.support_.sum() == n_features // 2
343
+
344
+ # Test when step is between (0,1) and floor(step * n_features) > 0
345
+ selector = RFE(estimator, step=0.20)
346
+ sel = selector.fit(X, y)
347
+ assert sel.support_.sum() == n_features // 2
348
+
349
+ # Test when step is an integer
350
+ selector = RFE(estimator, step=5)
351
+ sel = selector.fit(X, y)
352
+ assert sel.support_.sum() == n_features // 2
353
+
354
+
355
+ def test_number_of_subsets_of_features(global_random_seed):
356
+ # In RFE, 'number_of_subsets_of_features'
357
+ # = the number of iterations in '_fit'
358
+ # = max(ranking_)
359
+ # = 1 + (n_features + step - n_features_to_select - 1) // step
360
+ # After optimization #4534, this number
361
+ # = 1 + np.ceil((n_features - n_features_to_select) / float(step))
362
+ # This test case is to test their equivalence, refer to #4534 and #3824
363
+
364
+ def formula1(n_features, n_features_to_select, step):
365
+ return 1 + ((n_features + step - n_features_to_select - 1) // step)
366
+
367
+ def formula2(n_features, n_features_to_select, step):
368
+ return 1 + np.ceil((n_features - n_features_to_select) / float(step))
369
+
370
+ # RFE
371
+ # Case 1, n_features - n_features_to_select is divisible by step
372
+ # Case 2, n_features - n_features_to_select is not divisible by step
373
+ n_features_list = [11, 11]
374
+ n_features_to_select_list = [3, 3]
375
+ step_list = [2, 3]
376
+ for n_features, n_features_to_select, step in zip(
377
+ n_features_list, n_features_to_select_list, step_list
378
+ ):
379
+ generator = check_random_state(global_random_seed)
380
+ X = generator.normal(size=(100, n_features))
381
+ y = generator.rand(100).round()
382
+ rfe = RFE(
383
+ estimator=SVC(kernel="linear"),
384
+ n_features_to_select=n_features_to_select,
385
+ step=step,
386
+ )
387
+ rfe.fit(X, y)
388
+ # this number also equals to the maximum of ranking_
389
+ assert np.max(rfe.ranking_) == formula1(n_features, n_features_to_select, step)
390
+ assert np.max(rfe.ranking_) == formula2(n_features, n_features_to_select, step)
391
+
392
+ # In RFECV, 'fit' calls 'RFE._fit'
393
+ # 'number_of_subsets_of_features' of RFE
394
+ # = the size of each score in 'cv_results_' of RFECV
395
+ # = the number of iterations of the for loop before optimization #4534
396
+
397
+ # RFECV, n_features_to_select = 1
398
+ # Case 1, n_features - 1 is divisible by step
399
+ # Case 2, n_features - 1 is not divisible by step
400
+
401
+ n_features_to_select = 1
402
+ n_features_list = [11, 10]
403
+ step_list = [2, 2]
404
+ for n_features, step in zip(n_features_list, step_list):
405
+ generator = check_random_state(global_random_seed)
406
+ X = generator.normal(size=(100, n_features))
407
+ y = generator.rand(100).round()
408
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=step)
409
+ rfecv.fit(X, y)
410
+
411
+ for key in rfecv.cv_results_.keys():
412
+ assert len(rfecv.cv_results_[key]) == formula1(
413
+ n_features, n_features_to_select, step
414
+ )
415
+ assert len(rfecv.cv_results_[key]) == formula2(
416
+ n_features, n_features_to_select, step
417
+ )
418
+
419
+
420
+ def test_rfe_cv_n_jobs(global_random_seed):
421
+ generator = check_random_state(global_random_seed)
422
+ iris = load_iris()
423
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
424
+ y = iris.target
425
+
426
+ rfecv = RFECV(estimator=SVC(kernel="linear"))
427
+ rfecv.fit(X, y)
428
+ rfecv_ranking = rfecv.ranking_
429
+
430
+ rfecv_cv_results_ = rfecv.cv_results_
431
+
432
+ rfecv.set_params(n_jobs=2)
433
+ rfecv.fit(X, y)
434
+ assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
435
+
436
+ assert rfecv_cv_results_.keys() == rfecv.cv_results_.keys()
437
+ for key in rfecv_cv_results_.keys():
438
+ assert rfecv_cv_results_[key] == pytest.approx(rfecv.cv_results_[key])
439
+
440
+
441
+ def test_rfe_cv_groups():
442
+ generator = check_random_state(0)
443
+ iris = load_iris()
444
+ number_groups = 4
445
+ groups = np.floor(np.linspace(0, number_groups, len(iris.target)))
446
+ X = iris.data
447
+ y = (iris.target > 0).astype(int)
448
+
449
+ est_groups = RFECV(
450
+ estimator=RandomForestClassifier(random_state=generator),
451
+ step=1,
452
+ scoring="accuracy",
453
+ cv=GroupKFold(n_splits=2),
454
+ )
455
+ est_groups.fit(X, y, groups=groups)
456
+ assert est_groups.n_features_ > 0
457
+
458
+
459
+ @pytest.mark.parametrize(
460
+ "importance_getter", [attrgetter("regressor_.coef_"), "regressor_.coef_"]
461
+ )
462
+ @pytest.mark.parametrize("selector, expected_n_features", [(RFE, 5), (RFECV, 4)])
463
+ def test_rfe_wrapped_estimator(importance_getter, selector, expected_n_features):
464
+ # Non-regression test for
465
+ # https://github.com/scikit-learn/scikit-learn/issues/15312
466
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
467
+ estimator = LinearSVR(dual="auto", random_state=0)
468
+
469
+ log_estimator = TransformedTargetRegressor(
470
+ regressor=estimator, func=np.log, inverse_func=np.exp
471
+ )
472
+
473
+ selector = selector(log_estimator, importance_getter=importance_getter)
474
+ sel = selector.fit(X, y)
475
+ assert sel.support_.sum() == expected_n_features
476
+
477
+
478
+ @pytest.mark.parametrize(
479
+ "importance_getter, err_type",
480
+ [
481
+ ("auto", ValueError),
482
+ ("random", AttributeError),
483
+ (lambda x: x.importance, AttributeError),
484
+ ],
485
+ )
486
+ @pytest.mark.parametrize("Selector", [RFE, RFECV])
487
+ def test_rfe_importance_getter_validation(importance_getter, err_type, Selector):
488
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=42)
489
+ estimator = LinearSVR(dual="auto")
490
+ log_estimator = TransformedTargetRegressor(
491
+ regressor=estimator, func=np.log, inverse_func=np.exp
492
+ )
493
+
494
+ with pytest.raises(err_type):
495
+ model = Selector(log_estimator, importance_getter=importance_getter)
496
+ model.fit(X, y)
497
+
498
+
499
+ @pytest.mark.parametrize("cv", [None, 5])
500
+ def test_rfe_allow_nan_inf_in_x(cv):
501
+ iris = load_iris()
502
+ X = iris.data
503
+ y = iris.target
504
+
505
+ # add nan and inf value to X
506
+ X[0][0] = np.nan
507
+ X[0][1] = np.inf
508
+
509
+ clf = MockClassifier()
510
+ if cv is not None:
511
+ rfe = RFECV(estimator=clf, cv=cv)
512
+ else:
513
+ rfe = RFE(estimator=clf)
514
+ rfe.fit(X, y)
515
+ rfe.transform(X)
516
+
517
+
518
+ def test_w_pipeline_2d_coef_():
519
+ pipeline = make_pipeline(StandardScaler(), LogisticRegression())
520
+
521
+ data, y = load_iris(return_X_y=True)
522
+ sfm = RFE(
523
+ pipeline,
524
+ n_features_to_select=2,
525
+ importance_getter="named_steps.logisticregression.coef_",
526
+ )
527
+
528
+ sfm.fit(data, y)
529
+ assert sfm.transform(data).shape[1] == 2
530
+
531
+
532
+ def test_rfecv_std_and_mean(global_random_seed):
533
+ generator = check_random_state(global_random_seed)
534
+ iris = load_iris()
535
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
536
+ y = iris.target
537
+
538
+ rfecv = RFECV(estimator=SVC(kernel="linear"))
539
+ rfecv.fit(X, y)
540
+ n_split_keys = len(rfecv.cv_results_) - 2
541
+ split_keys = [f"split{i}_test_score" for i in range(n_split_keys)]
542
+
543
+ cv_scores = np.asarray([rfecv.cv_results_[key] for key in split_keys])
544
+ expected_mean = np.mean(cv_scores, axis=0)
545
+ expected_std = np.std(cv_scores, axis=0)
546
+
547
+ assert_allclose(rfecv.cv_results_["mean_test_score"], expected_mean)
548
+ assert_allclose(rfecv.cv_results_["std_test_score"], expected_std)
549
+
550
+
551
+ @pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
552
+ def test_multioutput(ClsRFE):
553
+ X = np.random.normal(size=(10, 3))
554
+ y = np.random.randint(2, size=(10, 2))
555
+ clf = RandomForestClassifier(n_estimators=5)
556
+ rfe_test = ClsRFE(clf)
557
+ rfe_test.fit(X, y)
558
+
559
+
560
+ @pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
561
+ def test_pipeline_with_nans(ClsRFE):
562
+ """Check that RFE works with pipeline that accept nans.
563
+
564
+ Non-regression test for gh-21743.
565
+ """
566
+ X, y = load_iris(return_X_y=True)
567
+ X[0, 0] = np.nan
568
+
569
+ pipe = make_pipeline(
570
+ SimpleImputer(),
571
+ StandardScaler(),
572
+ LogisticRegression(),
573
+ )
574
+
575
+ fs = ClsRFE(
576
+ estimator=pipe,
577
+ importance_getter="named_steps.logisticregression.coef_",
578
+ )
579
+ fs.fit(X, y)
580
+
581
+
582
+ @pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
583
+ @pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression])
584
+ def test_rfe_pls(ClsRFE, PLSEstimator):
585
+ """Check the behaviour of RFE with PLS estimators.
586
+
587
+ Non-regression test for:
588
+ https://github.com/scikit-learn/scikit-learn/issues/12410
589
+ """
590
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
591
+ estimator = PLSEstimator(n_components=1)
592
+ selector = ClsRFE(estimator, step=1).fit(X, y)
593
+ assert selector.score(X, y) > 0.5
594
+
595
+
596
+ def test_rfe_estimator_attribute_error():
597
+ """Check that we raise the proper AttributeError when the estimator
598
+ does not implement the `decision_function` method, which is decorated with
599
+ `available_if`.
600
+
601
+ Non-regression test for:
602
+ https://github.com/scikit-learn/scikit-learn/issues/28108
603
+ """
604
+ iris = load_iris()
605
+
606
+ # `LinearRegression` does not implement 'decision_function' and should raise an
607
+ # AttributeError
608
+ rfe = RFE(estimator=LinearRegression())
609
+
610
+ outer_msg = "This 'RFE' has no attribute 'decision_function'"
611
+ inner_msg = "'LinearRegression' object has no attribute 'decision_function'"
612
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
613
+ rfe.fit(iris.data, iris.target).decision_function(iris.data)
614
+ assert isinstance(exec_info.value.__cause__, AttributeError)
615
+ assert inner_msg in str(exec_info.value.__cause__)
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_array_equal
4
+
5
+ from sklearn.cluster import KMeans
6
+ from sklearn.datasets import make_blobs, make_classification, make_regression
7
+ from sklearn.ensemble import HistGradientBoostingRegressor
8
+ from sklearn.feature_selection import SequentialFeatureSelector
9
+ from sklearn.linear_model import LinearRegression
10
+ from sklearn.model_selection import LeaveOneGroupOut, cross_val_score
11
+ from sklearn.neighbors import KNeighborsClassifier
12
+ from sklearn.pipeline import make_pipeline
13
+ from sklearn.preprocessing import StandardScaler
14
+ from sklearn.utils.fixes import CSR_CONTAINERS
15
+
16
+
17
+ def test_bad_n_features_to_select():
18
+ n_features = 5
19
+ X, y = make_regression(n_features=n_features)
20
+ sfs = SequentialFeatureSelector(LinearRegression(), n_features_to_select=n_features)
21
+ with pytest.raises(ValueError, match="n_features_to_select must be < n_features"):
22
+ sfs.fit(X, y)
23
+
24
+
25
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
26
+ @pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto"))
27
+ def test_n_features_to_select(direction, n_features_to_select):
28
+ # Make sure n_features_to_select is respected
29
+
30
+ n_features = 10
31
+ X, y = make_regression(n_features=n_features, random_state=0)
32
+ sfs = SequentialFeatureSelector(
33
+ LinearRegression(),
34
+ n_features_to_select=n_features_to_select,
35
+ direction=direction,
36
+ cv=2,
37
+ )
38
+ sfs.fit(X, y)
39
+
40
+ if n_features_to_select == "auto":
41
+ n_features_to_select = n_features // 2
42
+
43
+ assert sfs.get_support(indices=True).shape[0] == n_features_to_select
44
+ assert sfs.n_features_to_select_ == n_features_to_select
45
+ assert sfs.transform(X).shape[1] == n_features_to_select
46
+
47
+
48
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
49
+ def test_n_features_to_select_auto(direction):
50
+ """Check the behaviour of `n_features_to_select="auto"` with different
51
+ values for the parameter `tol`.
52
+ """
53
+
54
+ n_features = 10
55
+ tol = 1e-3
56
+ X, y = make_regression(n_features=n_features, random_state=0)
57
+ sfs = SequentialFeatureSelector(
58
+ LinearRegression(),
59
+ n_features_to_select="auto",
60
+ tol=tol,
61
+ direction=direction,
62
+ cv=2,
63
+ )
64
+ sfs.fit(X, y)
65
+
66
+ max_features_to_select = n_features - 1
67
+
68
+ assert sfs.get_support(indices=True).shape[0] <= max_features_to_select
69
+ assert sfs.n_features_to_select_ <= max_features_to_select
70
+ assert sfs.transform(X).shape[1] <= max_features_to_select
71
+ assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_
72
+
73
+
74
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
75
+ def test_n_features_to_select_stopping_criterion(direction):
76
+ """Check the behaviour stopping criterion for feature selection
77
+ depending on the values of `n_features_to_select` and `tol`.
78
+
79
+ When `direction` is `'forward'`, select a new features at random
80
+ among those not currently selected in selector.support_,
81
+ build a new version of the data that includes all the features
82
+ in selector.support_ + this newly selected feature.
83
+ And check that the cross-validation score of the model trained on
84
+ this new dataset variant is lower than the model with
85
+ the selected forward selected features or at least does not improve
86
+ by more than the tol margin.
87
+
88
+ When `direction` is `'backward'`, instead of adding a new feature
89
+ to selector.support_, try to remove one of those selected features at random
90
+ And check that the cross-validation score is either decreasing or
91
+ not improving by more than the tol margin.
92
+ """
93
+
94
+ X, y = make_regression(n_features=50, n_informative=10, random_state=0)
95
+
96
+ tol = 1e-3
97
+
98
+ sfs = SequentialFeatureSelector(
99
+ LinearRegression(),
100
+ n_features_to_select="auto",
101
+ tol=tol,
102
+ direction=direction,
103
+ cv=2,
104
+ )
105
+ sfs.fit(X, y)
106
+ selected_X = sfs.transform(X)
107
+
108
+ rng = np.random.RandomState(0)
109
+
110
+ added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True)))
111
+ added_X = np.hstack(
112
+ [
113
+ selected_X,
114
+ (X[:, rng.choice(added_candidates)])[:, np.newaxis],
115
+ ]
116
+ )
117
+
118
+ removed_candidate = rng.choice(list(range(sfs.n_features_to_select_)))
119
+ removed_X = np.delete(selected_X, removed_candidate, axis=1)
120
+
121
+ plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean()
122
+ sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean()
123
+ added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean()
124
+ removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean()
125
+
126
+ assert sfs_cv_score >= plain_cv_score
127
+
128
+ if direction == "forward":
129
+ assert (sfs_cv_score - added_cv_score) <= tol
130
+ assert (sfs_cv_score - removed_cv_score) >= tol
131
+ else:
132
+ assert (added_cv_score - sfs_cv_score) <= tol
133
+ assert (removed_cv_score - sfs_cv_score) <= tol
134
+
135
+
136
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
137
+ @pytest.mark.parametrize(
138
+ "n_features_to_select, expected",
139
+ (
140
+ (0.1, 1),
141
+ (1.0, 10),
142
+ (0.5, 5),
143
+ ),
144
+ )
145
+ def test_n_features_to_select_float(direction, n_features_to_select, expected):
146
+ # Test passing a float as n_features_to_select
147
+ X, y = make_regression(n_features=10)
148
+ sfs = SequentialFeatureSelector(
149
+ LinearRegression(),
150
+ n_features_to_select=n_features_to_select,
151
+ direction=direction,
152
+ cv=2,
153
+ )
154
+ sfs.fit(X, y)
155
+ assert sfs.n_features_to_select_ == expected
156
+
157
+
158
+ @pytest.mark.parametrize("seed", range(10))
159
+ @pytest.mark.parametrize("direction", ("forward", "backward"))
160
+ @pytest.mark.parametrize(
161
+ "n_features_to_select, expected_selected_features",
162
+ [
163
+ (2, [0, 2]), # f1 is dropped since it has no predictive power
164
+ (1, [2]), # f2 is more predictive than f0 so it's kept
165
+ ],
166
+ )
167
+ def test_sanity(seed, direction, n_features_to_select, expected_selected_features):
168
+ # Basic sanity check: 3 features, only f0 and f2 are correlated with the
169
+ # target, f2 having a stronger correlation than f0. We expect f1 to be
170
+ # dropped, and f2 to always be selected.
171
+
172
+ rng = np.random.RandomState(seed)
173
+ n_samples = 100
174
+ X = rng.randn(n_samples, 3)
175
+ y = 3 * X[:, 0] - 10 * X[:, 2]
176
+
177
+ sfs = SequentialFeatureSelector(
178
+ LinearRegression(),
179
+ n_features_to_select=n_features_to_select,
180
+ direction=direction,
181
+ cv=2,
182
+ )
183
+ sfs.fit(X, y)
184
+ assert_array_equal(sfs.get_support(indices=True), expected_selected_features)
185
+
186
+
187
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
188
+ def test_sparse_support(csr_container):
189
+ # Make sure sparse data is supported
190
+
191
+ X, y = make_regression(n_features=10)
192
+ X = csr_container(X)
193
+ sfs = SequentialFeatureSelector(
194
+ LinearRegression(), n_features_to_select="auto", cv=2
195
+ )
196
+ sfs.fit(X, y)
197
+ sfs.transform(X)
198
+
199
+
200
+ def test_nan_support():
201
+ # Make sure nans are OK if the underlying estimator supports nans
202
+
203
+ rng = np.random.RandomState(0)
204
+ n_samples, n_features = 40, 4
205
+ X, y = make_regression(n_samples, n_features, random_state=0)
206
+ nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool)
207
+ X[nan_mask] = np.nan
208
+ sfs = SequentialFeatureSelector(
209
+ HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2
210
+ )
211
+ sfs.fit(X, y)
212
+ sfs.transform(X)
213
+
214
+ with pytest.raises(ValueError, match="Input X contains NaN"):
215
+ # LinearRegression does not support nans
216
+ SequentialFeatureSelector(
217
+ LinearRegression(), n_features_to_select="auto", cv=2
218
+ ).fit(X, y)
219
+
220
+
221
+ def test_pipeline_support():
222
+ # Make sure that pipelines can be passed into SFS and that SFS can be
223
+ # passed into a pipeline
224
+
225
+ n_samples, n_features = 50, 3
226
+ X, y = make_regression(n_samples, n_features, random_state=0)
227
+
228
+ # pipeline in SFS
229
+ pipe = make_pipeline(StandardScaler(), LinearRegression())
230
+ sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2)
231
+ sfs.fit(X, y)
232
+ sfs.transform(X)
233
+
234
+ # SFS in pipeline
235
+ sfs = SequentialFeatureSelector(
236
+ LinearRegression(), n_features_to_select="auto", cv=2
237
+ )
238
+ pipe = make_pipeline(StandardScaler(), sfs)
239
+ pipe.fit(X, y)
240
+ pipe.transform(X)
241
+
242
+
243
+ @pytest.mark.parametrize("n_features_to_select", (2, 3))
244
+ def test_unsupervised_model_fit(n_features_to_select):
245
+ # Make sure that models without classification labels are not being
246
+ # validated
247
+
248
+ X, y = make_blobs(n_features=4)
249
+ sfs = SequentialFeatureSelector(
250
+ KMeans(n_init=1),
251
+ n_features_to_select=n_features_to_select,
252
+ )
253
+ sfs.fit(X)
254
+ assert sfs.transform(X).shape[1] == n_features_to_select
255
+
256
+
257
+ @pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3))
258
+ def test_no_y_validation_model_fit(y):
259
+ # Make sure that other non-conventional y labels are not accepted
260
+
261
+ X, clusters = make_blobs(n_features=6)
262
+ sfs = SequentialFeatureSelector(
263
+ KMeans(),
264
+ n_features_to_select=3,
265
+ )
266
+
267
+ with pytest.raises((TypeError, ValueError)):
268
+ sfs.fit(X, y)
269
+
270
+
271
+ def test_forward_neg_tol_error():
272
+ """Check that we raise an error when tol<0 and direction='forward'"""
273
+ X, y = make_regression(n_features=10, random_state=0)
274
+ sfs = SequentialFeatureSelector(
275
+ LinearRegression(),
276
+ n_features_to_select="auto",
277
+ direction="forward",
278
+ tol=-1e-3,
279
+ )
280
+
281
+ with pytest.raises(ValueError, match="tol must be positive"):
282
+ sfs.fit(X, y)
283
+
284
+
285
+ def test_backward_neg_tol():
286
+ """Check that SequentialFeatureSelector works negative tol
287
+
288
+ non-regression test for #25525
289
+ """
290
+ X, y = make_regression(n_features=10, random_state=0)
291
+ lr = LinearRegression()
292
+ initial_score = lr.fit(X, y).score(X, y)
293
+
294
+ sfs = SequentialFeatureSelector(
295
+ lr,
296
+ n_features_to_select="auto",
297
+ direction="backward",
298
+ tol=-1e-3,
299
+ )
300
+ Xr = sfs.fit_transform(X, y)
301
+ new_score = lr.fit(Xr, y).score(Xr, y)
302
+
303
+ assert 0 < sfs.get_support().sum() < X.shape[1]
304
+ assert new_score < initial_score
305
+
306
+
307
+ def test_cv_generator_support():
308
+ """Check that no exception raised when cv is generator
309
+
310
+ non-regression test for #25957
311
+ """
312
+ X, y = make_classification(random_state=0)
313
+
314
+ groups = np.zeros_like(y, dtype=int)
315
+ groups[y.size // 2 :] = 1
316
+
317
+ cv = LeaveOneGroupOut()
318
+ splits = cv.split(X, y, groups=groups)
319
+
320
+ knc = KNeighborsClassifier(n_neighbors=5)
321
+
322
+ sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits)
323
+ sfs.fit(X, y)
llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.feature_selection import VarianceThreshold
5
+ from sklearn.utils._testing import assert_array_equal
6
+ from sklearn.utils.fixes import BSR_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
7
+
8
+ data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]]
9
+
10
+ data2 = [[-0.13725701]] * 10
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
15
+ )
16
+ def test_zero_variance(sparse_container):
17
+ # Test VarianceThreshold with default setting, zero variance.
18
+ X = data if sparse_container is None else sparse_container(data)
19
+ sel = VarianceThreshold().fit(X)
20
+ assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True))
21
+
22
+
23
+ def test_zero_variance_value_error():
24
+ # Test VarianceThreshold with default setting, zero variance, error cases.
25
+ with pytest.raises(ValueError):
26
+ VarianceThreshold().fit([[0, 1, 2, 3]])
27
+ with pytest.raises(ValueError):
28
+ VarianceThreshold().fit([[0, 1], [0, 1]])
29
+
30
+
31
+ @pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
32
+ def test_variance_threshold(sparse_container):
33
+ # Test VarianceThreshold with custom variance.
34
+ X = data if sparse_container is None else sparse_container(data)
35
+ X = VarianceThreshold(threshold=0.4).fit_transform(X)
36
+ assert (len(data), 1) == X.shape
37
+
38
+
39
+ @pytest.mark.skipif(
40
+ np.var(data2) == 0,
41
+ reason=(
42
+ "This test is not valid for this platform, "
43
+ "as it relies on numerical instabilities."
44
+ ),
45
+ )
46
+ @pytest.mark.parametrize(
47
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
48
+ )
49
+ def test_zero_variance_floating_point_error(sparse_container):
50
+ # Test that VarianceThreshold(0.0).fit eliminates features that have
51
+ # the same value in every sample, even when floating point errors
52
+ # cause np.var not to be 0 for the feature.
53
+ # See #13691
54
+ X = data2 if sparse_container is None else sparse_container(data2)
55
+ msg = "No feature in X meets the variance threshold 0.00000"
56
+ with pytest.raises(ValueError, match=msg):
57
+ VarianceThreshold().fit(X)
58
+
59
+
60
+ @pytest.mark.parametrize(
61
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
62
+ )
63
+ def test_variance_nan(sparse_container):
64
+ arr = np.array(data, dtype=np.float64)
65
+ # add single NaN and feature should still be included
66
+ arr[0, 0] = np.nan
67
+ # make all values in feature NaN and feature should be rejected
68
+ arr[:, 1] = np.nan
69
+
70
+ X = arr if sparse_container is None else sparse_container(arr)
71
+ sel = VarianceThreshold().fit(X)
72
+ assert_array_equal([0, 3, 4], sel.get_support(indices=True))
llmeval-env/lib/python3.10/site-packages/sklearn/impute/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Transformers for missing value imputation"""
2
+ import typing
3
+
4
+ from ._base import MissingIndicator, SimpleImputer
5
+ from ._knn import KNNImputer
6
+
7
+ if typing.TYPE_CHECKING:
8
+ # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
9
+ # TODO: remove this check once the estimator is no longer experimental.
10
+ from ._iterative import IterativeImputer # noqa
11
+
12
+ __all__ = ["MissingIndicator", "SimpleImputer", "KNNImputer"]
13
+
14
+
15
+ # TODO: remove this check once the estimator is no longer experimental.
16
+ def __getattr__(name):
17
+ if name == "IterativeImputer":
18
+ raise ImportError(
19
+ f"{name} is experimental and the API might change without any "
20
+ "deprecation cycle. To use it, you need to explicitly import "
21
+ "enable_iterative_imputer:\n"
22
+ "from sklearn.experimental import enable_iterative_imputer"
23
+ )
24
+ raise AttributeError(f"module {__name__} has no attribute {name}")
llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (911 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/impute/_base.py ADDED
@@ -0,0 +1,1075 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Nicolas Tresegnie <[email protected]>
2
+ # Sergey Feldman <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import numbers
6
+ import warnings
7
+ from collections import Counter
8
+ from functools import partial
9
+
10
+ import numpy as np
11
+ import numpy.ma as ma
12
+ from scipy import sparse as sp
13
+
14
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
15
+ from ..utils import _is_pandas_na, is_scalar_nan
16
+ from ..utils._mask import _get_mask
17
+ from ..utils._param_validation import MissingValues, StrOptions
18
+ from ..utils.fixes import _mode
19
+ from ..utils.sparsefuncs import _get_median
20
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
21
+
22
+
23
+ def _check_inputs_dtype(X, missing_values):
24
+ if _is_pandas_na(missing_values):
25
+ # Allow using `pd.NA` as missing values to impute numerical arrays.
26
+ return
27
+ if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
28
+ raise ValueError(
29
+ "'X' and 'missing_values' types are expected to be"
30
+ " both numerical. Got X.dtype={} and "
31
+ " type(missing_values)={}.".format(X.dtype, type(missing_values))
32
+ )
33
+
34
+
35
+ def _most_frequent(array, extra_value, n_repeat):
36
+ """Compute the most frequent value in a 1d array extended with
37
+ [extra_value] * n_repeat, where extra_value is assumed to be not part
38
+ of the array."""
39
+ # Compute the most frequent value in array only
40
+ if array.size > 0:
41
+ if array.dtype == object:
42
+ # scipy.stats.mode is slow with object dtype array.
43
+ # Python Counter is more efficient
44
+ counter = Counter(array)
45
+ most_frequent_count = counter.most_common(1)[0][1]
46
+ # tie breaking similarly to scipy.stats.mode
47
+ most_frequent_value = min(
48
+ value
49
+ for value, count in counter.items()
50
+ if count == most_frequent_count
51
+ )
52
+ else:
53
+ mode = _mode(array)
54
+ most_frequent_value = mode[0][0]
55
+ most_frequent_count = mode[1][0]
56
+ else:
57
+ most_frequent_value = 0
58
+ most_frequent_count = 0
59
+
60
+ # Compare to array + [extra_value] * n_repeat
61
+ if most_frequent_count == 0 and n_repeat == 0:
62
+ return np.nan
63
+ elif most_frequent_count < n_repeat:
64
+ return extra_value
65
+ elif most_frequent_count > n_repeat:
66
+ return most_frequent_value
67
+ elif most_frequent_count == n_repeat:
68
+ # tie breaking similarly to scipy.stats.mode
69
+ return min(most_frequent_value, extra_value)
70
+
71
+
72
+ class _BaseImputer(TransformerMixin, BaseEstimator):
73
+ """Base class for all imputers.
74
+
75
+ It adds automatically support for `add_indicator`.
76
+ """
77
+
78
+ _parameter_constraints: dict = {
79
+ "missing_values": [MissingValues()],
80
+ "add_indicator": ["boolean"],
81
+ "keep_empty_features": ["boolean"],
82
+ }
83
+
84
+ def __init__(
85
+ self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False
86
+ ):
87
+ self.missing_values = missing_values
88
+ self.add_indicator = add_indicator
89
+ self.keep_empty_features = keep_empty_features
90
+
91
+ def _fit_indicator(self, X):
92
+ """Fit a MissingIndicator."""
93
+ if self.add_indicator:
94
+ self.indicator_ = MissingIndicator(
95
+ missing_values=self.missing_values, error_on_new=False
96
+ )
97
+ self.indicator_._fit(X, precomputed=True)
98
+ else:
99
+ self.indicator_ = None
100
+
101
+ def _transform_indicator(self, X):
102
+ """Compute the indicator mask.'
103
+
104
+ Note that X must be the original data as passed to the imputer before
105
+ any imputation, since imputation may be done inplace in some cases.
106
+ """
107
+ if self.add_indicator:
108
+ if not hasattr(self, "indicator_"):
109
+ raise ValueError(
110
+ "Make sure to call _fit_indicator before _transform_indicator"
111
+ )
112
+ return self.indicator_.transform(X)
113
+
114
+ def _concatenate_indicator(self, X_imputed, X_indicator):
115
+ """Concatenate indicator mask with the imputed data."""
116
+ if not self.add_indicator:
117
+ return X_imputed
118
+
119
+ if sp.issparse(X_imputed):
120
+ # sp.hstack may result in different formats between sparse arrays and
121
+ # matrices; specify the format to keep consistent behavior
122
+ hstack = partial(sp.hstack, format=X_imputed.format)
123
+ else:
124
+ hstack = np.hstack
125
+
126
+ if X_indicator is None:
127
+ raise ValueError(
128
+ "Data from the missing indicator are not provided. Call "
129
+ "_fit_indicator and _transform_indicator in the imputer "
130
+ "implementation."
131
+ )
132
+
133
+ return hstack((X_imputed, X_indicator))
134
+
135
+ def _concatenate_indicator_feature_names_out(self, names, input_features):
136
+ if not self.add_indicator:
137
+ return names
138
+
139
+ indicator_names = self.indicator_.get_feature_names_out(input_features)
140
+ return np.concatenate([names, indicator_names])
141
+
142
+ def _more_tags(self):
143
+ return {"allow_nan": is_scalar_nan(self.missing_values)}
144
+
145
+
146
+ class SimpleImputer(_BaseImputer):
147
+ """Univariate imputer for completing missing values with simple strategies.
148
+
149
+ Replace missing values using a descriptive statistic (e.g. mean, median, or
150
+ most frequent) along each column, or using a constant value.
151
+
152
+ Read more in the :ref:`User Guide <impute>`.
153
+
154
+ .. versionadded:: 0.20
155
+ `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
156
+ estimator which is now removed.
157
+
158
+ Parameters
159
+ ----------
160
+ missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan
161
+ The placeholder for the missing values. All occurrences of
162
+ `missing_values` will be imputed. For pandas' dataframes with
163
+ nullable integer dtypes with missing values, `missing_values`
164
+ can be set to either `np.nan` or `pd.NA`.
165
+
166
+ strategy : str, default='mean'
167
+ The imputation strategy.
168
+
169
+ - If "mean", then replace missing values using the mean along
170
+ each column. Can only be used with numeric data.
171
+ - If "median", then replace missing values using the median along
172
+ each column. Can only be used with numeric data.
173
+ - If "most_frequent", then replace missing using the most frequent
174
+ value along each column. Can be used with strings or numeric data.
175
+ If there is more than one such value, only the smallest is returned.
176
+ - If "constant", then replace missing values with fill_value. Can be
177
+ used with strings or numeric data.
178
+
179
+ .. versionadded:: 0.20
180
+ strategy="constant" for fixed value imputation.
181
+
182
+ fill_value : str or numerical value, default=None
183
+ When strategy == "constant", `fill_value` is used to replace all
184
+ occurrences of missing_values. For string or object data types,
185
+ `fill_value` must be a string.
186
+ If `None`, `fill_value` will be 0 when imputing numerical
187
+ data and "missing_value" for strings or object data types.
188
+
189
+ copy : bool, default=True
190
+ If True, a copy of X will be created. If False, imputation will
191
+ be done in-place whenever possible. Note that, in the following cases,
192
+ a new copy will always be made, even if `copy=False`:
193
+
194
+ - If `X` is not an array of floating values;
195
+ - If `X` is encoded as a CSR matrix;
196
+ - If `add_indicator=True`.
197
+
198
+ add_indicator : bool, default=False
199
+ If True, a :class:`MissingIndicator` transform will stack onto output
200
+ of the imputer's transform. This allows a predictive estimator
201
+ to account for missingness despite imputation. If a feature has no
202
+ missing values at fit/train time, the feature won't appear on
203
+ the missing indicator even if there are missing values at
204
+ transform/test time.
205
+
206
+ keep_empty_features : bool, default=False
207
+ If True, features that consist exclusively of missing values when
208
+ `fit` is called are returned in results when `transform` is called.
209
+ The imputed value is always `0` except when `strategy="constant"`
210
+ in which case `fill_value` will be used instead.
211
+
212
+ .. versionadded:: 1.2
213
+
214
+ Attributes
215
+ ----------
216
+ statistics_ : array of shape (n_features,)
217
+ The imputation fill value for each feature.
218
+ Computing statistics can result in `np.nan` values.
219
+ During :meth:`transform`, features corresponding to `np.nan`
220
+ statistics will be discarded.
221
+
222
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
223
+ Indicator used to add binary indicators for missing values.
224
+ `None` if `add_indicator=False`.
225
+
226
+ n_features_in_ : int
227
+ Number of features seen during :term:`fit`.
228
+
229
+ .. versionadded:: 0.24
230
+
231
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
232
+ Names of features seen during :term:`fit`. Defined only when `X`
233
+ has feature names that are all strings.
234
+
235
+ .. versionadded:: 1.0
236
+
237
+ See Also
238
+ --------
239
+ IterativeImputer : Multivariate imputer that estimates values to impute for
240
+ each feature with missing values from all the others.
241
+ KNNImputer : Multivariate imputer that estimates missing features using
242
+ nearest samples.
243
+
244
+ Notes
245
+ -----
246
+ Columns which only contained missing values at :meth:`fit` are discarded
247
+ upon :meth:`transform` if strategy is not `"constant"`.
248
+
249
+ In a prediction context, simple imputation usually performs poorly when
250
+ associated with a weak learner. However, with a powerful learner, it can
251
+ lead to as good or better performance than complex imputation such as
252
+ :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`.
253
+
254
+ Examples
255
+ --------
256
+ >>> import numpy as np
257
+ >>> from sklearn.impute import SimpleImputer
258
+ >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
259
+ >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
260
+ SimpleImputer()
261
+ >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
262
+ >>> print(imp_mean.transform(X))
263
+ [[ 7. 2. 3. ]
264
+ [ 4. 3.5 6. ]
265
+ [10. 3.5 9. ]]
266
+
267
+ For a more detailed example see
268
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`.
269
+ """
270
+
271
+ _parameter_constraints: dict = {
272
+ **_BaseImputer._parameter_constraints,
273
+ "strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})],
274
+ "fill_value": "no_validation", # any object is valid
275
+ "copy": ["boolean"],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ *,
281
+ missing_values=np.nan,
282
+ strategy="mean",
283
+ fill_value=None,
284
+ copy=True,
285
+ add_indicator=False,
286
+ keep_empty_features=False,
287
+ ):
288
+ super().__init__(
289
+ missing_values=missing_values,
290
+ add_indicator=add_indicator,
291
+ keep_empty_features=keep_empty_features,
292
+ )
293
+ self.strategy = strategy
294
+ self.fill_value = fill_value
295
+ self.copy = copy
296
+
297
+ def _validate_input(self, X, in_fit):
298
+ if self.strategy in ("most_frequent", "constant"):
299
+ # If input is a list of strings, dtype = object.
300
+ # Otherwise ValueError is raised in SimpleImputer
301
+ # with strategy='most_frequent' or 'constant'
302
+ # because the list is converted to Unicode numpy array
303
+ if isinstance(X, list) and any(
304
+ isinstance(elem, str) for row in X for elem in row
305
+ ):
306
+ dtype = object
307
+ else:
308
+ dtype = None
309
+ else:
310
+ dtype = FLOAT_DTYPES
311
+
312
+ if not in_fit and self._fit_dtype.kind == "O":
313
+ # Use object dtype if fitted on object dtypes
314
+ dtype = self._fit_dtype
315
+
316
+ if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
317
+ force_all_finite = "allow-nan"
318
+ else:
319
+ force_all_finite = True
320
+
321
+ try:
322
+ X = self._validate_data(
323
+ X,
324
+ reset=in_fit,
325
+ accept_sparse="csc",
326
+ dtype=dtype,
327
+ force_all_finite=force_all_finite,
328
+ copy=self.copy,
329
+ )
330
+ except ValueError as ve:
331
+ if "could not convert" in str(ve):
332
+ new_ve = ValueError(
333
+ "Cannot use {} strategy with non-numeric data:\n{}".format(
334
+ self.strategy, ve
335
+ )
336
+ )
337
+ raise new_ve from None
338
+ else:
339
+ raise ve
340
+
341
+ if in_fit:
342
+ # Use the dtype seen in `fit` for non-`fit` conversion
343
+ self._fit_dtype = X.dtype
344
+
345
+ _check_inputs_dtype(X, self.missing_values)
346
+ if X.dtype.kind not in ("i", "u", "f", "O"):
347
+ raise ValueError(
348
+ "SimpleImputer does not support data with dtype "
349
+ "{0}. Please provide either a numeric array (with"
350
+ " a floating point or integer dtype) or "
351
+ "categorical data represented either as an array "
352
+ "with integer dtype or an array of string values "
353
+ "with an object dtype.".format(X.dtype)
354
+ )
355
+
356
+ if sp.issparse(X) and self.missing_values == 0:
357
+ # missing_values = 0 not allowed with sparse data as it would
358
+ # force densification
359
+ raise ValueError(
360
+ "Imputation not possible when missing_values "
361
+ "== 0 and input is sparse. Provide a dense "
362
+ "array instead."
363
+ )
364
+
365
+ if self.strategy == "constant":
366
+ if in_fit and self.fill_value is not None:
367
+ fill_value_dtype = type(self.fill_value)
368
+ err_msg = (
369
+ f"fill_value={self.fill_value!r} (of type {fill_value_dtype!r}) "
370
+ f"cannot be cast to the input data that is {X.dtype!r}. Make sure "
371
+ "that both dtypes are of the same kind."
372
+ )
373
+ elif not in_fit:
374
+ fill_value_dtype = self.statistics_.dtype
375
+ err_msg = (
376
+ f"The dtype of the filling value (i.e. {fill_value_dtype!r}) "
377
+ f"cannot be cast to the input data that is {X.dtype!r}. Make sure "
378
+ "that the dtypes of the input data is of the same kind between "
379
+ "fit and transform."
380
+ )
381
+ else:
382
+ # By default, fill_value=None, and the replacement is always
383
+ # compatible with the input data
384
+ fill_value_dtype = X.dtype
385
+
386
+ # Make sure we can safely cast fill_value dtype to the input data dtype
387
+ if not np.can_cast(fill_value_dtype, X.dtype, casting="same_kind"):
388
+ raise ValueError(err_msg)
389
+
390
+ return X
391
+
392
+ @_fit_context(prefer_skip_nested_validation=True)
393
+ def fit(self, X, y=None):
394
+ """Fit the imputer on `X`.
395
+
396
+ Parameters
397
+ ----------
398
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
399
+ Input data, where `n_samples` is the number of samples and
400
+ `n_features` is the number of features.
401
+
402
+ y : Ignored
403
+ Not used, present here for API consistency by convention.
404
+
405
+ Returns
406
+ -------
407
+ self : object
408
+ Fitted estimator.
409
+ """
410
+ X = self._validate_input(X, in_fit=True)
411
+
412
+ # default fill_value is 0 for numerical input and "missing_value"
413
+ # otherwise
414
+ if self.fill_value is None:
415
+ if X.dtype.kind in ("i", "u", "f"):
416
+ fill_value = 0
417
+ else:
418
+ fill_value = "missing_value"
419
+ else:
420
+ fill_value = self.fill_value
421
+
422
+ if sp.issparse(X):
423
+ self.statistics_ = self._sparse_fit(
424
+ X, self.strategy, self.missing_values, fill_value
425
+ )
426
+ else:
427
+ self.statistics_ = self._dense_fit(
428
+ X, self.strategy, self.missing_values, fill_value
429
+ )
430
+
431
+ return self
432
+
433
+ def _sparse_fit(self, X, strategy, missing_values, fill_value):
434
+ """Fit the transformer on sparse data."""
435
+ missing_mask = _get_mask(X, missing_values)
436
+ mask_data = missing_mask.data
437
+ n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
438
+
439
+ statistics = np.empty(X.shape[1])
440
+
441
+ if strategy == "constant":
442
+ # for constant strategy, self.statistics_ is used to store
443
+ # fill_value in each column
444
+ statistics.fill(fill_value)
445
+ else:
446
+ for i in range(X.shape[1]):
447
+ column = X.data[X.indptr[i] : X.indptr[i + 1]]
448
+ mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
449
+ column = column[~mask_column]
450
+
451
+ # combine explicit and implicit zeros
452
+ mask_zeros = _get_mask(column, 0)
453
+ column = column[~mask_zeros]
454
+ n_explicit_zeros = mask_zeros.sum()
455
+ n_zeros = n_implicit_zeros[i] + n_explicit_zeros
456
+
457
+ if len(column) == 0 and self.keep_empty_features:
458
+ # in case we want to keep columns with only missing values.
459
+ statistics[i] = 0
460
+ else:
461
+ if strategy == "mean":
462
+ s = column.size + n_zeros
463
+ statistics[i] = np.nan if s == 0 else column.sum() / s
464
+
465
+ elif strategy == "median":
466
+ statistics[i] = _get_median(column, n_zeros)
467
+
468
+ elif strategy == "most_frequent":
469
+ statistics[i] = _most_frequent(column, 0, n_zeros)
470
+
471
+ super()._fit_indicator(missing_mask)
472
+
473
+ return statistics
474
+
475
+ def _dense_fit(self, X, strategy, missing_values, fill_value):
476
+ """Fit the transformer on dense data."""
477
+ missing_mask = _get_mask(X, missing_values)
478
+ masked_X = ma.masked_array(X, mask=missing_mask)
479
+
480
+ super()._fit_indicator(missing_mask)
481
+
482
+ # Mean
483
+ if strategy == "mean":
484
+ mean_masked = np.ma.mean(masked_X, axis=0)
485
+ # Avoid the warning "Warning: converting a masked element to nan."
486
+ mean = np.ma.getdata(mean_masked)
487
+ mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
488
+
489
+ return mean
490
+
491
+ # Median
492
+ elif strategy == "median":
493
+ median_masked = np.ma.median(masked_X, axis=0)
494
+ # Avoid the warning "Warning: converting a masked element to nan."
495
+ median = np.ma.getdata(median_masked)
496
+ median[np.ma.getmaskarray(median_masked)] = (
497
+ 0 if self.keep_empty_features else np.nan
498
+ )
499
+
500
+ return median
501
+
502
+ # Most frequent
503
+ elif strategy == "most_frequent":
504
+ # Avoid use of scipy.stats.mstats.mode due to the required
505
+ # additional overhead and slow benchmarking performance.
506
+ # See Issue 14325 and PR 14399 for full discussion.
507
+
508
+ # To be able access the elements by columns
509
+ X = X.transpose()
510
+ mask = missing_mask.transpose()
511
+
512
+ if X.dtype.kind == "O":
513
+ most_frequent = np.empty(X.shape[0], dtype=object)
514
+ else:
515
+ most_frequent = np.empty(X.shape[0])
516
+
517
+ for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
518
+ row_mask = np.logical_not(row_mask).astype(bool)
519
+ row = row[row_mask]
520
+ if len(row) == 0 and self.keep_empty_features:
521
+ most_frequent[i] = 0
522
+ else:
523
+ most_frequent[i] = _most_frequent(row, np.nan, 0)
524
+
525
+ return most_frequent
526
+
527
+ # Constant
528
+ elif strategy == "constant":
529
+ # for constant strategy, self.statistcs_ is used to store
530
+ # fill_value in each column
531
+ return np.full(X.shape[1], fill_value, dtype=X.dtype)
532
+
533
+ def transform(self, X):
534
+ """Impute all missing values in `X`.
535
+
536
+ Parameters
537
+ ----------
538
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
539
+ The input data to complete.
540
+
541
+ Returns
542
+ -------
543
+ X_imputed : {ndarray, sparse matrix} of shape \
544
+ (n_samples, n_features_out)
545
+ `X` with imputed values.
546
+ """
547
+ check_is_fitted(self)
548
+
549
+ X = self._validate_input(X, in_fit=False)
550
+ statistics = self.statistics_
551
+
552
+ if X.shape[1] != statistics.shape[0]:
553
+ raise ValueError(
554
+ "X has %d features per sample, expected %d"
555
+ % (X.shape[1], self.statistics_.shape[0])
556
+ )
557
+
558
+ # compute mask before eliminating invalid features
559
+ missing_mask = _get_mask(X, self.missing_values)
560
+
561
+ # Decide whether to keep missing features
562
+ if self.strategy == "constant" or self.keep_empty_features:
563
+ valid_statistics = statistics
564
+ valid_statistics_indexes = None
565
+ else:
566
+ # same as np.isnan but also works for object dtypes
567
+ invalid_mask = _get_mask(statistics, np.nan)
568
+ valid_mask = np.logical_not(invalid_mask)
569
+ valid_statistics = statistics[valid_mask]
570
+ valid_statistics_indexes = np.flatnonzero(valid_mask)
571
+
572
+ if invalid_mask.any():
573
+ invalid_features = np.arange(X.shape[1])[invalid_mask]
574
+ # use feature names warning if features are provided
575
+ if hasattr(self, "feature_names_in_"):
576
+ invalid_features = self.feature_names_in_[invalid_features]
577
+ warnings.warn(
578
+ "Skipping features without any observed values:"
579
+ f" {invalid_features}. At least one non-missing value is needed"
580
+ f" for imputation with strategy='{self.strategy}'."
581
+ )
582
+ X = X[:, valid_statistics_indexes]
583
+
584
+ # Do actual imputation
585
+ if sp.issparse(X):
586
+ if self.missing_values == 0:
587
+ raise ValueError(
588
+ "Imputation not possible when missing_values "
589
+ "== 0 and input is sparse. Provide a dense "
590
+ "array instead."
591
+ )
592
+ else:
593
+ # if no invalid statistics are found, use the mask computed
594
+ # before, else recompute mask
595
+ if valid_statistics_indexes is None:
596
+ mask = missing_mask.data
597
+ else:
598
+ mask = _get_mask(X.data, self.missing_values)
599
+ indexes = np.repeat(
600
+ np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
601
+ )[mask]
602
+
603
+ X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
604
+ else:
605
+ # use mask computed before eliminating invalid mask
606
+ if valid_statistics_indexes is None:
607
+ mask_valid_features = missing_mask
608
+ else:
609
+ mask_valid_features = missing_mask[:, valid_statistics_indexes]
610
+ n_missing = np.sum(mask_valid_features, axis=0)
611
+ values = np.repeat(valid_statistics, n_missing)
612
+ coordinates = np.where(mask_valid_features.transpose())[::-1]
613
+
614
+ X[coordinates] = values
615
+
616
+ X_indicator = super()._transform_indicator(missing_mask)
617
+
618
+ return super()._concatenate_indicator(X, X_indicator)
619
+
620
+ def inverse_transform(self, X):
621
+ """Convert the data back to the original representation.
622
+
623
+ Inverts the `transform` operation performed on an array.
624
+ This operation can only be performed after :class:`SimpleImputer` is
625
+ instantiated with `add_indicator=True`.
626
+
627
+ Note that `inverse_transform` can only invert the transform in
628
+ features that have binary indicators for missing values. If a feature
629
+ has no missing values at `fit` time, the feature won't have a binary
630
+ indicator, and the imputation done at `transform` time won't be
631
+ inverted.
632
+
633
+ .. versionadded:: 0.24
634
+
635
+ Parameters
636
+ ----------
637
+ X : array-like of shape \
638
+ (n_samples, n_features + n_features_missing_indicator)
639
+ The imputed data to be reverted to original data. It has to be
640
+ an augmented array of imputed data and the missing indicator mask.
641
+
642
+ Returns
643
+ -------
644
+ X_original : ndarray of shape (n_samples, n_features)
645
+ The original `X` with missing values as it was prior
646
+ to imputation.
647
+ """
648
+ check_is_fitted(self)
649
+
650
+ if not self.add_indicator:
651
+ raise ValueError(
652
+ "'inverse_transform' works only when "
653
+ "'SimpleImputer' is instantiated with "
654
+ "'add_indicator=True'. "
655
+ f"Got 'add_indicator={self.add_indicator}' "
656
+ "instead."
657
+ )
658
+
659
+ n_features_missing = len(self.indicator_.features_)
660
+ non_empty_feature_count = X.shape[1] - n_features_missing
661
+ array_imputed = X[:, :non_empty_feature_count].copy()
662
+ missing_mask = X[:, non_empty_feature_count:].astype(bool)
663
+
664
+ n_features_original = len(self.statistics_)
665
+ shape_original = (X.shape[0], n_features_original)
666
+ X_original = np.zeros(shape_original)
667
+ X_original[:, self.indicator_.features_] = missing_mask
668
+ full_mask = X_original.astype(bool)
669
+
670
+ imputed_idx, original_idx = 0, 0
671
+ while imputed_idx < len(array_imputed.T):
672
+ if not np.all(X_original[:, original_idx]):
673
+ X_original[:, original_idx] = array_imputed.T[imputed_idx]
674
+ imputed_idx += 1
675
+ original_idx += 1
676
+ else:
677
+ original_idx += 1
678
+
679
+ X_original[full_mask] = self.missing_values
680
+ return X_original
681
+
682
+ def _more_tags(self):
683
+ return {
684
+ "allow_nan": _is_pandas_na(self.missing_values) or is_scalar_nan(
685
+ self.missing_values
686
+ )
687
+ }
688
+
689
+ def get_feature_names_out(self, input_features=None):
690
+ """Get output feature names for transformation.
691
+
692
+ Parameters
693
+ ----------
694
+ input_features : array-like of str or None, default=None
695
+ Input features.
696
+
697
+ - If `input_features` is `None`, then `feature_names_in_` is
698
+ used as feature names in. If `feature_names_in_` is not defined,
699
+ then the following input feature names are generated:
700
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
701
+ - If `input_features` is an array-like, then `input_features` must
702
+ match `feature_names_in_` if `feature_names_in_` is defined.
703
+
704
+ Returns
705
+ -------
706
+ feature_names_out : ndarray of str objects
707
+ Transformed feature names.
708
+ """
709
+ check_is_fitted(self, "n_features_in_")
710
+ input_features = _check_feature_names_in(self, input_features)
711
+ non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))
712
+ names = input_features[non_missing_mask]
713
+ return self._concatenate_indicator_feature_names_out(names, input_features)
714
+
715
+
716
+ class MissingIndicator(TransformerMixin, BaseEstimator):
717
+ """Binary indicators for missing values.
718
+
719
+ Note that this component typically should not be used in a vanilla
720
+ :class:`~sklearn.pipeline.Pipeline` consisting of transformers and a
721
+ classifier, but rather could be added using a
722
+ :class:`~sklearn.pipeline.FeatureUnion` or
723
+ :class:`~sklearn.compose.ColumnTransformer`.
724
+
725
+ Read more in the :ref:`User Guide <impute>`.
726
+
727
+ .. versionadded:: 0.20
728
+
729
+ Parameters
730
+ ----------
731
+ missing_values : int, float, str, np.nan or None, default=np.nan
732
+ The placeholder for the missing values. All occurrences of
733
+ `missing_values` will be imputed. For pandas' dataframes with
734
+ nullable integer dtypes with missing values, `missing_values`
735
+ should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
736
+
737
+ features : {'missing-only', 'all'}, default='missing-only'
738
+ Whether the imputer mask should represent all or a subset of
739
+ features.
740
+
741
+ - If `'missing-only'` (default), the imputer mask will only represent
742
+ features containing missing values during fit time.
743
+ - If `'all'`, the imputer mask will represent all features.
744
+
745
+ sparse : bool or 'auto', default='auto'
746
+ Whether the imputer mask format should be sparse or dense.
747
+
748
+ - If `'auto'` (default), the imputer mask will be of same type as
749
+ input.
750
+ - If `True`, the imputer mask will be a sparse matrix.
751
+ - If `False`, the imputer mask will be a numpy array.
752
+
753
+ error_on_new : bool, default=True
754
+ If `True`, :meth:`transform` will raise an error when there are
755
+ features with missing values that have no missing values in
756
+ :meth:`fit`. This is applicable only when `features='missing-only'`.
757
+
758
+ Attributes
759
+ ----------
760
+ features_ : ndarray of shape (n_missing_features,) or (n_features,)
761
+ The features indices which will be returned when calling
762
+ :meth:`transform`. They are computed during :meth:`fit`. If
763
+ `features='all'`, `features_` is equal to `range(n_features)`.
764
+
765
+ n_features_in_ : int
766
+ Number of features seen during :term:`fit`.
767
+
768
+ .. versionadded:: 0.24
769
+
770
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
771
+ Names of features seen during :term:`fit`. Defined only when `X`
772
+ has feature names that are all strings.
773
+
774
+ .. versionadded:: 1.0
775
+
776
+ See Also
777
+ --------
778
+ SimpleImputer : Univariate imputation of missing values.
779
+ IterativeImputer : Multivariate imputation of missing values.
780
+
781
+ Examples
782
+ --------
783
+ >>> import numpy as np
784
+ >>> from sklearn.impute import MissingIndicator
785
+ >>> X1 = np.array([[np.nan, 1, 3],
786
+ ... [4, 0, np.nan],
787
+ ... [8, 1, 0]])
788
+ >>> X2 = np.array([[5, 1, np.nan],
789
+ ... [np.nan, 2, 3],
790
+ ... [2, 4, 0]])
791
+ >>> indicator = MissingIndicator()
792
+ >>> indicator.fit(X1)
793
+ MissingIndicator()
794
+ >>> X2_tr = indicator.transform(X2)
795
+ >>> X2_tr
796
+ array([[False, True],
797
+ [ True, False],
798
+ [False, False]])
799
+ """
800
+
801
+ _parameter_constraints: dict = {
802
+ "missing_values": [MissingValues()],
803
+ "features": [StrOptions({"missing-only", "all"})],
804
+ "sparse": ["boolean", StrOptions({"auto"})],
805
+ "error_on_new": ["boolean"],
806
+ }
807
+
808
+ def __init__(
809
+ self,
810
+ *,
811
+ missing_values=np.nan,
812
+ features="missing-only",
813
+ sparse="auto",
814
+ error_on_new=True,
815
+ ):
816
+ self.missing_values = missing_values
817
+ self.features = features
818
+ self.sparse = sparse
819
+ self.error_on_new = error_on_new
820
+
821
+ def _get_missing_features_info(self, X):
822
+ """Compute the imputer mask and the indices of the features
823
+ containing missing values.
824
+
825
+ Parameters
826
+ ----------
827
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
828
+ The input data with missing values. Note that `X` has been
829
+ checked in :meth:`fit` and :meth:`transform` before to call this
830
+ function.
831
+
832
+ Returns
833
+ -------
834
+ imputer_mask : {ndarray, sparse matrix} of shape \
835
+ (n_samples, n_features)
836
+ The imputer mask of the original data.
837
+
838
+ features_with_missing : ndarray of shape (n_features_with_missing)
839
+ The features containing missing values.
840
+ """
841
+ if not self._precomputed:
842
+ imputer_mask = _get_mask(X, self.missing_values)
843
+ else:
844
+ imputer_mask = X
845
+
846
+ if sp.issparse(X):
847
+ imputer_mask.eliminate_zeros()
848
+
849
+ if self.features == "missing-only":
850
+ n_missing = imputer_mask.getnnz(axis=0)
851
+
852
+ if self.sparse is False:
853
+ imputer_mask = imputer_mask.toarray()
854
+ elif imputer_mask.format == "csr":
855
+ imputer_mask = imputer_mask.tocsc()
856
+ else:
857
+ if not self._precomputed:
858
+ imputer_mask = _get_mask(X, self.missing_values)
859
+ else:
860
+ imputer_mask = X
861
+
862
+ if self.features == "missing-only":
863
+ n_missing = imputer_mask.sum(axis=0)
864
+
865
+ if self.sparse is True:
866
+ imputer_mask = sp.csc_matrix(imputer_mask)
867
+
868
+ if self.features == "all":
869
+ features_indices = np.arange(X.shape[1])
870
+ else:
871
+ features_indices = np.flatnonzero(n_missing)
872
+
873
+ return imputer_mask, features_indices
874
+
875
+ def _validate_input(self, X, in_fit):
876
+ if not is_scalar_nan(self.missing_values):
877
+ force_all_finite = True
878
+ else:
879
+ force_all_finite = "allow-nan"
880
+ X = self._validate_data(
881
+ X,
882
+ reset=in_fit,
883
+ accept_sparse=("csc", "csr"),
884
+ dtype=None,
885
+ force_all_finite=force_all_finite,
886
+ )
887
+ _check_inputs_dtype(X, self.missing_values)
888
+ if X.dtype.kind not in ("i", "u", "f", "O"):
889
+ raise ValueError(
890
+ "MissingIndicator does not support data with "
891
+ "dtype {0}. Please provide either a numeric array"
892
+ " (with a floating point or integer dtype) or "
893
+ "categorical data represented either as an array "
894
+ "with integer dtype or an array of string values "
895
+ "with an object dtype.".format(X.dtype)
896
+ )
897
+
898
+ if sp.issparse(X) and self.missing_values == 0:
899
+ # missing_values = 0 not allowed with sparse data as it would
900
+ # force densification
901
+ raise ValueError(
902
+ "Sparse input with missing_values=0 is "
903
+ "not supported. Provide a dense "
904
+ "array instead."
905
+ )
906
+
907
+ return X
908
+
909
+ def _fit(self, X, y=None, precomputed=False):
910
+ """Fit the transformer on `X`.
911
+
912
+ Parameters
913
+ ----------
914
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
915
+ Input data, where `n_samples` is the number of samples and
916
+ `n_features` is the number of features.
917
+ If `precomputed=True`, then `X` is a mask of the input data.
918
+
919
+ precomputed : bool
920
+ Whether the input data is a mask.
921
+
922
+ Returns
923
+ -------
924
+ imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
925
+ n_features)
926
+ The imputer mask of the original data.
927
+ """
928
+ if precomputed:
929
+ if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
930
+ raise ValueError("precomputed is True but the input data is not a mask")
931
+ self._precomputed = True
932
+ else:
933
+ self._precomputed = False
934
+
935
+ # Need not validate X again as it would have already been validated
936
+ # in the Imputer calling MissingIndicator
937
+ if not self._precomputed:
938
+ X = self._validate_input(X, in_fit=True)
939
+ else:
940
+ # only create `n_features_in_` in the precomputed case
941
+ self._check_n_features(X, reset=True)
942
+
943
+ self._n_features = X.shape[1]
944
+
945
+ missing_features_info = self._get_missing_features_info(X)
946
+ self.features_ = missing_features_info[1]
947
+
948
+ return missing_features_info[0]
949
+
950
+ @_fit_context(prefer_skip_nested_validation=True)
951
+ def fit(self, X, y=None):
952
+ """Fit the transformer on `X`.
953
+
954
+ Parameters
955
+ ----------
956
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
957
+ Input data, where `n_samples` is the number of samples and
958
+ `n_features` is the number of features.
959
+
960
+ y : Ignored
961
+ Not used, present for API consistency by convention.
962
+
963
+ Returns
964
+ -------
965
+ self : object
966
+ Fitted estimator.
967
+ """
968
+ self._fit(X, y)
969
+
970
+ return self
971
+
972
+ def transform(self, X):
973
+ """Generate missing values indicator for `X`.
974
+
975
+ Parameters
976
+ ----------
977
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
978
+ The input data to complete.
979
+
980
+ Returns
981
+ -------
982
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
983
+ or (n_samples, n_features_with_missing)
984
+ The missing indicator for input data. The data type of `Xt`
985
+ will be boolean.
986
+ """
987
+ check_is_fitted(self)
988
+
989
+ # Need not validate X again as it would have already been validated
990
+ # in the Imputer calling MissingIndicator
991
+ if not self._precomputed:
992
+ X = self._validate_input(X, in_fit=False)
993
+ else:
994
+ if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
995
+ raise ValueError("precomputed is True but the input data is not a mask")
996
+
997
+ imputer_mask, features = self._get_missing_features_info(X)
998
+
999
+ if self.features == "missing-only":
1000
+ features_diff_fit_trans = np.setdiff1d(features, self.features_)
1001
+ if self.error_on_new and features_diff_fit_trans.size > 0:
1002
+ raise ValueError(
1003
+ "The features {} have missing values "
1004
+ "in transform but have no missing values "
1005
+ "in fit.".format(features_diff_fit_trans)
1006
+ )
1007
+
1008
+ if self.features_.size < self._n_features:
1009
+ imputer_mask = imputer_mask[:, self.features_]
1010
+
1011
+ return imputer_mask
1012
+
1013
+ @_fit_context(prefer_skip_nested_validation=True)
1014
+ def fit_transform(self, X, y=None):
1015
+ """Generate missing values indicator for `X`.
1016
+
1017
+ Parameters
1018
+ ----------
1019
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1020
+ The input data to complete.
1021
+
1022
+ y : Ignored
1023
+ Not used, present for API consistency by convention.
1024
+
1025
+ Returns
1026
+ -------
1027
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
1028
+ or (n_samples, n_features_with_missing)
1029
+ The missing indicator for input data. The data type of `Xt`
1030
+ will be boolean.
1031
+ """
1032
+ imputer_mask = self._fit(X, y)
1033
+
1034
+ if self.features_.size < self._n_features:
1035
+ imputer_mask = imputer_mask[:, self.features_]
1036
+
1037
+ return imputer_mask
1038
+
1039
+ def get_feature_names_out(self, input_features=None):
1040
+ """Get output feature names for transformation.
1041
+
1042
+ Parameters
1043
+ ----------
1044
+ input_features : array-like of str or None, default=None
1045
+ Input features.
1046
+
1047
+ - If `input_features` is `None`, then `feature_names_in_` is
1048
+ used as feature names in. If `feature_names_in_` is not defined,
1049
+ then the following input feature names are generated:
1050
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
1051
+ - If `input_features` is an array-like, then `input_features` must
1052
+ match `feature_names_in_` if `feature_names_in_` is defined.
1053
+
1054
+ Returns
1055
+ -------
1056
+ feature_names_out : ndarray of str objects
1057
+ Transformed feature names.
1058
+ """
1059
+ check_is_fitted(self, "n_features_in_")
1060
+ input_features = _check_feature_names_in(self, input_features)
1061
+ prefix = self.__class__.__name__.lower()
1062
+ return np.asarray(
1063
+ [
1064
+ f"{prefix}_{feature_name}"
1065
+ for feature_name in input_features[self.features_]
1066
+ ],
1067
+ dtype=object,
1068
+ )
1069
+
1070
+ def _more_tags(self):
1071
+ return {
1072
+ "allow_nan": True,
1073
+ "X_types": ["2darray", "string"],
1074
+ "preserves_dtype": [],
1075
+ }
llmeval-env/lib/python3.10/site-packages/sklearn/impute/_iterative.py ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import namedtuple
3
+ from numbers import Integral, Real
4
+ from time import time
5
+
6
+ import numpy as np
7
+ from scipy import stats
8
+
9
+ from ..base import _fit_context, clone
10
+ from ..exceptions import ConvergenceWarning
11
+ from ..preprocessing import normalize
12
+ from ..utils import (
13
+ _safe_assign,
14
+ _safe_indexing,
15
+ check_array,
16
+ check_random_state,
17
+ is_scalar_nan,
18
+ )
19
+ from ..utils._mask import _get_mask
20
+ from ..utils._param_validation import HasMethods, Interval, StrOptions
21
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
22
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
23
+ from ._base import SimpleImputer, _BaseImputer, _check_inputs_dtype
24
+
25
+ _ImputerTriplet = namedtuple(
26
+ "_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"]
27
+ )
28
+
29
+
30
+ def _assign_where(X1, X2, cond):
31
+ """Assign X2 to X1 where cond is True.
32
+
33
+ Parameters
34
+ ----------
35
+ X1 : ndarray or dataframe of shape (n_samples, n_features)
36
+ Data.
37
+
38
+ X2 : ndarray of shape (n_samples, n_features)
39
+ Data to be assigned.
40
+
41
+ cond : ndarray of shape (n_samples, n_features)
42
+ Boolean mask to assign data.
43
+ """
44
+ if hasattr(X1, "mask"): # pandas dataframes
45
+ X1.mask(cond=cond, other=X2, inplace=True)
46
+ else: # ndarrays
47
+ X1[cond] = X2[cond]
48
+
49
+
50
+ class IterativeImputer(_RoutingNotSupportedMixin, _BaseImputer):
51
+ """Multivariate imputer that estimates each feature from all the others.
52
+
53
+ A strategy for imputing missing values by modeling each feature with
54
+ missing values as a function of other features in a round-robin fashion.
55
+
56
+ Read more in the :ref:`User Guide <iterative_imputer>`.
57
+
58
+ .. versionadded:: 0.21
59
+
60
+ .. note::
61
+
62
+ This estimator is still **experimental** for now: the predictions
63
+ and the API might change without any deprecation cycle. To use it,
64
+ you need to explicitly import `enable_iterative_imputer`::
65
+
66
+ >>> # explicitly require this experimental feature
67
+ >>> from sklearn.experimental import enable_iterative_imputer # noqa
68
+ >>> # now you can import normally from sklearn.impute
69
+ >>> from sklearn.impute import IterativeImputer
70
+
71
+ Parameters
72
+ ----------
73
+ estimator : estimator object, default=BayesianRidge()
74
+ The estimator to use at each step of the round-robin imputation.
75
+ If `sample_posterior=True`, the estimator must support
76
+ `return_std` in its `predict` method.
77
+
78
+ missing_values : int or np.nan, default=np.nan
79
+ The placeholder for the missing values. All occurrences of
80
+ `missing_values` will be imputed. For pandas' dataframes with
81
+ nullable integer dtypes with missing values, `missing_values`
82
+ should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
83
+
84
+ sample_posterior : bool, default=False
85
+ Whether to sample from the (Gaussian) predictive posterior of the
86
+ fitted estimator for each imputation. Estimator must support
87
+ `return_std` in its `predict` method if set to `True`. Set to
88
+ `True` if using `IterativeImputer` for multiple imputations.
89
+
90
+ max_iter : int, default=10
91
+ Maximum number of imputation rounds to perform before returning the
92
+ imputations computed during the final round. A round is a single
93
+ imputation of each feature with missing values. The stopping criterion
94
+ is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`,
95
+ where `X_t` is `X` at iteration `t`. Note that early stopping is only
96
+ applied if `sample_posterior=False`.
97
+
98
+ tol : float, default=1e-3
99
+ Tolerance of the stopping condition.
100
+
101
+ n_nearest_features : int, default=None
102
+ Number of other features to use to estimate the missing values of
103
+ each feature column. Nearness between features is measured using
104
+ the absolute correlation coefficient between each feature pair (after
105
+ initial imputation). To ensure coverage of features throughout the
106
+ imputation process, the neighbor features are not necessarily nearest,
107
+ but are drawn with probability proportional to correlation for each
108
+ imputed target feature. Can provide significant speed-up when the
109
+ number of features is huge. If `None`, all features will be used.
110
+
111
+ initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \
112
+ default='mean'
113
+ Which strategy to use to initialize the missing values. Same as the
114
+ `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`.
115
+
116
+ fill_value : str or numerical value, default=None
117
+ When `strategy="constant"`, `fill_value` is used to replace all
118
+ occurrences of missing_values. For string or object data types,
119
+ `fill_value` must be a string.
120
+ If `None`, `fill_value` will be 0 when imputing numerical
121
+ data and "missing_value" for strings or object data types.
122
+
123
+ .. versionadded:: 1.3
124
+
125
+ imputation_order : {'ascending', 'descending', 'roman', 'arabic', \
126
+ 'random'}, default='ascending'
127
+ The order in which the features will be imputed. Possible values:
128
+
129
+ - `'ascending'`: From features with fewest missing values to most.
130
+ - `'descending'`: From features with most missing values to fewest.
131
+ - `'roman'`: Left to right.
132
+ - `'arabic'`: Right to left.
133
+ - `'random'`: A random order for each round.
134
+
135
+ skip_complete : bool, default=False
136
+ If `True` then features with missing values during :meth:`transform`
137
+ which did not have any missing values during :meth:`fit` will be
138
+ imputed with the initial imputation method only. Set to `True` if you
139
+ have many features with no missing values at both :meth:`fit` and
140
+ :meth:`transform` time to save compute.
141
+
142
+ min_value : float or array-like of shape (n_features,), default=-np.inf
143
+ Minimum possible imputed value. Broadcast to shape `(n_features,)` if
144
+ scalar. If array-like, expects shape `(n_features,)`, one min value for
145
+ each feature. The default is `-np.inf`.
146
+
147
+ .. versionchanged:: 0.23
148
+ Added support for array-like.
149
+
150
+ max_value : float or array-like of shape (n_features,), default=np.inf
151
+ Maximum possible imputed value. Broadcast to shape `(n_features,)` if
152
+ scalar. If array-like, expects shape `(n_features,)`, one max value for
153
+ each feature. The default is `np.inf`.
154
+
155
+ .. versionchanged:: 0.23
156
+ Added support for array-like.
157
+
158
+ verbose : int, default=0
159
+ Verbosity flag, controls the debug messages that are issued
160
+ as functions are evaluated. The higher, the more verbose. Can be 0, 1,
161
+ or 2.
162
+
163
+ random_state : int, RandomState instance or None, default=None
164
+ The seed of the pseudo random number generator to use. Randomizes
165
+ selection of estimator features if `n_nearest_features` is not `None`,
166
+ the `imputation_order` if `random`, and the sampling from posterior if
167
+ `sample_posterior=True`. Use an integer for determinism.
168
+ See :term:`the Glossary <random_state>`.
169
+
170
+ add_indicator : bool, default=False
171
+ If `True`, a :class:`MissingIndicator` transform will stack onto output
172
+ of the imputer's transform. This allows a predictive estimator
173
+ to account for missingness despite imputation. If a feature has no
174
+ missing values at fit/train time, the feature won't appear on
175
+ the missing indicator even if there are missing values at
176
+ transform/test time.
177
+
178
+ keep_empty_features : bool, default=False
179
+ If True, features that consist exclusively of missing values when
180
+ `fit` is called are returned in results when `transform` is called.
181
+ The imputed value is always `0` except when
182
+ `initial_strategy="constant"` in which case `fill_value` will be
183
+ used instead.
184
+
185
+ .. versionadded:: 1.2
186
+
187
+ Attributes
188
+ ----------
189
+ initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer`
190
+ Imputer used to initialize the missing values.
191
+
192
+ imputation_sequence_ : list of tuples
193
+ Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where
194
+ `feat_idx` is the current feature to be imputed,
195
+ `neighbor_feat_idx` is the array of other features used to impute the
196
+ current feature, and `estimator` is the trained estimator used for
197
+ the imputation. Length is `self.n_features_with_missing_ *
198
+ self.n_iter_`.
199
+
200
+ n_iter_ : int
201
+ Number of iteration rounds that occurred. Will be less than
202
+ `self.max_iter` if early stopping criterion was reached.
203
+
204
+ n_features_in_ : int
205
+ Number of features seen during :term:`fit`.
206
+
207
+ .. versionadded:: 0.24
208
+
209
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
210
+ Names of features seen during :term:`fit`. Defined only when `X`
211
+ has feature names that are all strings.
212
+
213
+ .. versionadded:: 1.0
214
+
215
+ n_features_with_missing_ : int
216
+ Number of features with missing values.
217
+
218
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
219
+ Indicator used to add binary indicators for missing values.
220
+ `None` if `add_indicator=False`.
221
+
222
+ random_state_ : RandomState instance
223
+ RandomState instance that is generated either from a seed, the random
224
+ number generator or by `np.random`.
225
+
226
+ See Also
227
+ --------
228
+ SimpleImputer : Univariate imputer for completing missing values
229
+ with simple strategies.
230
+ KNNImputer : Multivariate imputer that estimates missing features using
231
+ nearest samples.
232
+
233
+ Notes
234
+ -----
235
+ To support imputation in inductive mode we store each feature's estimator
236
+ during the :meth:`fit` phase, and predict without refitting (in order)
237
+ during the :meth:`transform` phase.
238
+
239
+ Features which contain all missing values at :meth:`fit` are discarded upon
240
+ :meth:`transform`.
241
+
242
+ Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))`
243
+ where :math:`k` = `max_iter`, :math:`n` the number of samples and
244
+ :math:`p` the number of features. It thus becomes prohibitively costly when
245
+ the number of features increases. Setting
246
+ `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol`
247
+ can help to reduce its computational cost.
248
+
249
+ Depending on the nature of missing values, simple imputers can be
250
+ preferable in a prediction context.
251
+
252
+ References
253
+ ----------
254
+ .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
255
+ Multivariate Imputation by Chained Equations in R". Journal of
256
+ Statistical Software 45: 1-67.
257
+ <https://www.jstatsoft.org/article/view/v045i03>`_
258
+
259
+ .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in
260
+ Multivariate Data Suitable for use with an Electronic Computer".
261
+ Journal of the Royal Statistical Society 22(2): 302-306.
262
+ <https://www.jstor.org/stable/2984099>`_
263
+
264
+ Examples
265
+ --------
266
+ >>> import numpy as np
267
+ >>> from sklearn.experimental import enable_iterative_imputer
268
+ >>> from sklearn.impute import IterativeImputer
269
+ >>> imp_mean = IterativeImputer(random_state=0)
270
+ >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
271
+ IterativeImputer(random_state=0)
272
+ >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
273
+ >>> imp_mean.transform(X)
274
+ array([[ 6.9584..., 2. , 3. ],
275
+ [ 4. , 2.6000..., 6. ],
276
+ [10. , 4.9999..., 9. ]])
277
+
278
+ For a more detailed example see
279
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py` or
280
+ :ref:`sphx_glr_auto_examples_impute_plot_iterative_imputer_variants_comparison.py`.
281
+ """
282
+
283
+ _parameter_constraints: dict = {
284
+ **_BaseImputer._parameter_constraints,
285
+ "estimator": [None, HasMethods(["fit", "predict"])],
286
+ "sample_posterior": ["boolean"],
287
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
288
+ "tol": [Interval(Real, 0, None, closed="left")],
289
+ "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")],
290
+ "initial_strategy": [
291
+ StrOptions({"mean", "median", "most_frequent", "constant"})
292
+ ],
293
+ "fill_value": "no_validation", # any object is valid
294
+ "imputation_order": [
295
+ StrOptions({"ascending", "descending", "roman", "arabic", "random"})
296
+ ],
297
+ "skip_complete": ["boolean"],
298
+ "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
299
+ "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
300
+ "verbose": ["verbose"],
301
+ "random_state": ["random_state"],
302
+ }
303
+
304
+ def __init__(
305
+ self,
306
+ estimator=None,
307
+ *,
308
+ missing_values=np.nan,
309
+ sample_posterior=False,
310
+ max_iter=10,
311
+ tol=1e-3,
312
+ n_nearest_features=None,
313
+ initial_strategy="mean",
314
+ fill_value=None,
315
+ imputation_order="ascending",
316
+ skip_complete=False,
317
+ min_value=-np.inf,
318
+ max_value=np.inf,
319
+ verbose=0,
320
+ random_state=None,
321
+ add_indicator=False,
322
+ keep_empty_features=False,
323
+ ):
324
+ super().__init__(
325
+ missing_values=missing_values,
326
+ add_indicator=add_indicator,
327
+ keep_empty_features=keep_empty_features,
328
+ )
329
+
330
+ self.estimator = estimator
331
+ self.sample_posterior = sample_posterior
332
+ self.max_iter = max_iter
333
+ self.tol = tol
334
+ self.n_nearest_features = n_nearest_features
335
+ self.initial_strategy = initial_strategy
336
+ self.fill_value = fill_value
337
+ self.imputation_order = imputation_order
338
+ self.skip_complete = skip_complete
339
+ self.min_value = min_value
340
+ self.max_value = max_value
341
+ self.verbose = verbose
342
+ self.random_state = random_state
343
+
344
+ def _impute_one_feature(
345
+ self,
346
+ X_filled,
347
+ mask_missing_values,
348
+ feat_idx,
349
+ neighbor_feat_idx,
350
+ estimator=None,
351
+ fit_mode=True,
352
+ ):
353
+ """Impute a single feature from the others provided.
354
+
355
+ This function predicts the missing values of one of the features using
356
+ the current estimates of all the other features. The `estimator` must
357
+ support `return_std=True` in its `predict` method for this function
358
+ to work.
359
+
360
+ Parameters
361
+ ----------
362
+ X_filled : ndarray
363
+ Input data with the most recent imputations.
364
+
365
+ mask_missing_values : ndarray
366
+ Input data's missing indicator matrix.
367
+
368
+ feat_idx : int
369
+ Index of the feature currently being imputed.
370
+
371
+ neighbor_feat_idx : ndarray
372
+ Indices of the features to be used in imputing `feat_idx`.
373
+
374
+ estimator : object
375
+ The estimator to use at this step of the round-robin imputation.
376
+ If `sample_posterior=True`, the estimator must support
377
+ `return_std` in its `predict` method.
378
+ If None, it will be cloned from self._estimator.
379
+
380
+ fit_mode : boolean, default=True
381
+ Whether to fit and predict with the estimator or just predict.
382
+
383
+ Returns
384
+ -------
385
+ X_filled : ndarray
386
+ Input data with `X_filled[missing_row_mask, feat_idx]` updated.
387
+
388
+ estimator : estimator with sklearn API
389
+ The fitted estimator used to impute
390
+ `X_filled[missing_row_mask, feat_idx]`.
391
+ """
392
+ if estimator is None and fit_mode is False:
393
+ raise ValueError(
394
+ "If fit_mode is False, then an already-fitted "
395
+ "estimator should be passed in."
396
+ )
397
+
398
+ if estimator is None:
399
+ estimator = clone(self._estimator)
400
+
401
+ missing_row_mask = mask_missing_values[:, feat_idx]
402
+ if fit_mode:
403
+ X_train = _safe_indexing(
404
+ _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
405
+ ~missing_row_mask,
406
+ axis=0,
407
+ )
408
+ y_train = _safe_indexing(
409
+ _safe_indexing(X_filled, feat_idx, axis=1),
410
+ ~missing_row_mask,
411
+ axis=0,
412
+ )
413
+ estimator.fit(X_train, y_train)
414
+
415
+ # if no missing values, don't predict
416
+ if np.sum(missing_row_mask) == 0:
417
+ return X_filled, estimator
418
+
419
+ # get posterior samples if there is at least one missing value
420
+ X_test = _safe_indexing(
421
+ _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
422
+ missing_row_mask,
423
+ axis=0,
424
+ )
425
+ if self.sample_posterior:
426
+ mus, sigmas = estimator.predict(X_test, return_std=True)
427
+ imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
428
+ # two types of problems: (1) non-positive sigmas
429
+ # (2) mus outside legal range of min_value and max_value
430
+ # (results in inf sample)
431
+ positive_sigmas = sigmas > 0
432
+ imputed_values[~positive_sigmas] = mus[~positive_sigmas]
433
+ mus_too_low = mus < self._min_value[feat_idx]
434
+ imputed_values[mus_too_low] = self._min_value[feat_idx]
435
+ mus_too_high = mus > self._max_value[feat_idx]
436
+ imputed_values[mus_too_high] = self._max_value[feat_idx]
437
+ # the rest can be sampled without statistical issues
438
+ inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
439
+ mus = mus[inrange_mask]
440
+ sigmas = sigmas[inrange_mask]
441
+ a = (self._min_value[feat_idx] - mus) / sigmas
442
+ b = (self._max_value[feat_idx] - mus) / sigmas
443
+
444
+ truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
445
+ imputed_values[inrange_mask] = truncated_normal.rvs(
446
+ random_state=self.random_state_
447
+ )
448
+ else:
449
+ imputed_values = estimator.predict(X_test)
450
+ imputed_values = np.clip(
451
+ imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]
452
+ )
453
+
454
+ # update the feature
455
+ _safe_assign(
456
+ X_filled,
457
+ imputed_values,
458
+ row_indexer=missing_row_mask,
459
+ column_indexer=feat_idx,
460
+ )
461
+ return X_filled, estimator
462
+
463
+ def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):
464
+ """Get a list of other features to predict `feat_idx`.
465
+
466
+ If `self.n_nearest_features` is less than or equal to the total
467
+ number of features, then use a probability proportional to the absolute
468
+ correlation between `feat_idx` and each other feature to randomly
469
+ choose a subsample of the other features (without replacement).
470
+
471
+ Parameters
472
+ ----------
473
+ n_features : int
474
+ Number of features in `X`.
475
+
476
+ feat_idx : int
477
+ Index of the feature currently being imputed.
478
+
479
+ abs_corr_mat : ndarray, shape (n_features, n_features)
480
+ Absolute correlation matrix of `X`. The diagonal has been zeroed
481
+ out and each feature has been normalized to sum to 1. Can be None.
482
+
483
+ Returns
484
+ -------
485
+ neighbor_feat_idx : array-like
486
+ The features to use to impute `feat_idx`.
487
+ """
488
+ if self.n_nearest_features is not None and self.n_nearest_features < n_features:
489
+ p = abs_corr_mat[:, feat_idx]
490
+ neighbor_feat_idx = self.random_state_.choice(
491
+ np.arange(n_features), self.n_nearest_features, replace=False, p=p
492
+ )
493
+ else:
494
+ inds_left = np.arange(feat_idx)
495
+ inds_right = np.arange(feat_idx + 1, n_features)
496
+ neighbor_feat_idx = np.concatenate((inds_left, inds_right))
497
+ return neighbor_feat_idx
498
+
499
+ def _get_ordered_idx(self, mask_missing_values):
500
+ """Decide in what order we will update the features.
501
+
502
+ As a homage to the MICE R package, we will have 4 main options of
503
+ how to order the updates, and use a random order if anything else
504
+ is specified.
505
+
506
+ Also, this function skips features which have no missing values.
507
+
508
+ Parameters
509
+ ----------
510
+ mask_missing_values : array-like, shape (n_samples, n_features)
511
+ Input data's missing indicator matrix, where `n_samples` is the
512
+ number of samples and `n_features` is the number of features.
513
+
514
+ Returns
515
+ -------
516
+ ordered_idx : ndarray, shape (n_features,)
517
+ The order in which to impute the features.
518
+ """
519
+ frac_of_missing_values = mask_missing_values.mean(axis=0)
520
+ if self.skip_complete:
521
+ missing_values_idx = np.flatnonzero(frac_of_missing_values)
522
+ else:
523
+ missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
524
+ if self.imputation_order == "roman":
525
+ ordered_idx = missing_values_idx
526
+ elif self.imputation_order == "arabic":
527
+ ordered_idx = missing_values_idx[::-1]
528
+ elif self.imputation_order == "ascending":
529
+ n = len(frac_of_missing_values) - len(missing_values_idx)
530
+ ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:]
531
+ elif self.imputation_order == "descending":
532
+ n = len(frac_of_missing_values) - len(missing_values_idx)
533
+ ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1]
534
+ elif self.imputation_order == "random":
535
+ ordered_idx = missing_values_idx
536
+ self.random_state_.shuffle(ordered_idx)
537
+ return ordered_idx
538
+
539
+ def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
540
+ """Get absolute correlation matrix between features.
541
+
542
+ Parameters
543
+ ----------
544
+ X_filled : ndarray, shape (n_samples, n_features)
545
+ Input data with the most recent imputations.
546
+
547
+ tolerance : float, default=1e-6
548
+ `abs_corr_mat` can have nans, which will be replaced
549
+ with `tolerance`.
550
+
551
+ Returns
552
+ -------
553
+ abs_corr_mat : ndarray, shape (n_features, n_features)
554
+ Absolute correlation matrix of `X` at the beginning of the
555
+ current round. The diagonal has been zeroed out and each feature's
556
+ absolute correlations with all others have been normalized to sum
557
+ to 1.
558
+ """
559
+ n_features = X_filled.shape[1]
560
+ if self.n_nearest_features is None or self.n_nearest_features >= n_features:
561
+ return None
562
+ with np.errstate(invalid="ignore"):
563
+ # if a feature in the neighborhood has only a single value
564
+ # (e.g., categorical feature), the std. dev. will be null and
565
+ # np.corrcoef will raise a warning due to a division by zero
566
+ abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
567
+ # np.corrcoef is not defined for features with zero std
568
+ abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
569
+ # ensures exploration, i.e. at least some probability of sampling
570
+ np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
571
+ # features are not their own neighbors
572
+ np.fill_diagonal(abs_corr_mat, 0)
573
+ # needs to sum to 1 for np.random.choice sampling
574
+ abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False)
575
+ return abs_corr_mat
576
+
577
+ def _initial_imputation(self, X, in_fit=False):
578
+ """Perform initial imputation for input `X`.
579
+
580
+ Parameters
581
+ ----------
582
+ X : ndarray of shape (n_samples, n_features)
583
+ Input data, where `n_samples` is the number of samples and
584
+ `n_features` is the number of features.
585
+
586
+ in_fit : bool, default=False
587
+ Whether function is called in :meth:`fit`.
588
+
589
+ Returns
590
+ -------
591
+ Xt : ndarray of shape (n_samples, n_features)
592
+ Input data, where `n_samples` is the number of samples and
593
+ `n_features` is the number of features.
594
+
595
+ X_filled : ndarray of shape (n_samples, n_features)
596
+ Input data with the most recent imputations.
597
+
598
+ mask_missing_values : ndarray of shape (n_samples, n_features)
599
+ Input data's missing indicator matrix, where `n_samples` is the
600
+ number of samples and `n_features` is the number of features,
601
+ masked by non-missing features.
602
+
603
+ X_missing_mask : ndarray, shape (n_samples, n_features)
604
+ Input data's mask matrix indicating missing datapoints, where
605
+ `n_samples` is the number of samples and `n_features` is the
606
+ number of features.
607
+ """
608
+ if is_scalar_nan(self.missing_values):
609
+ force_all_finite = "allow-nan"
610
+ else:
611
+ force_all_finite = True
612
+
613
+ X = self._validate_data(
614
+ X,
615
+ dtype=FLOAT_DTYPES,
616
+ order="F",
617
+ reset=in_fit,
618
+ force_all_finite=force_all_finite,
619
+ )
620
+ _check_inputs_dtype(X, self.missing_values)
621
+
622
+ X_missing_mask = _get_mask(X, self.missing_values)
623
+ mask_missing_values = X_missing_mask.copy()
624
+ if self.initial_imputer_ is None:
625
+ self.initial_imputer_ = SimpleImputer(
626
+ missing_values=self.missing_values,
627
+ strategy=self.initial_strategy,
628
+ fill_value=self.fill_value,
629
+ keep_empty_features=self.keep_empty_features,
630
+ ).set_output(transform="default")
631
+ X_filled = self.initial_imputer_.fit_transform(X)
632
+ else:
633
+ X_filled = self.initial_imputer_.transform(X)
634
+
635
+ valid_mask = np.flatnonzero(
636
+ np.logical_not(np.isnan(self.initial_imputer_.statistics_))
637
+ )
638
+
639
+ if not self.keep_empty_features:
640
+ # drop empty features
641
+ Xt = X[:, valid_mask]
642
+ mask_missing_values = mask_missing_values[:, valid_mask]
643
+ else:
644
+ # mark empty features as not missing and keep the original
645
+ # imputation
646
+ mask_missing_values[:, valid_mask] = True
647
+ Xt = X
648
+
649
+ return Xt, X_filled, mask_missing_values, X_missing_mask
650
+
651
+ @staticmethod
652
+ def _validate_limit(limit, limit_type, n_features):
653
+ """Validate the limits (min/max) of the feature values.
654
+
655
+ Converts scalar min/max limits to vectors of shape `(n_features,)`.
656
+
657
+ Parameters
658
+ ----------
659
+ limit: scalar or array-like
660
+ The user-specified limit (i.e, min_value or max_value).
661
+ limit_type: {'max', 'min'}
662
+ Type of limit to validate.
663
+ n_features: int
664
+ Number of features in the dataset.
665
+
666
+ Returns
667
+ -------
668
+ limit: ndarray, shape(n_features,)
669
+ Array of limits, one for each feature.
670
+ """
671
+ limit_bound = np.inf if limit_type == "max" else -np.inf
672
+ limit = limit_bound if limit is None else limit
673
+ if np.isscalar(limit):
674
+ limit = np.full(n_features, limit)
675
+ limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False)
676
+ if not limit.shape[0] == n_features:
677
+ raise ValueError(
678
+ f"'{limit_type}_value' should be of "
679
+ f"shape ({n_features},) when an array-like "
680
+ f"is provided. Got {limit.shape}, instead."
681
+ )
682
+ return limit
683
+
684
+ @_fit_context(
685
+ # IterativeImputer.estimator is not validated yet
686
+ prefer_skip_nested_validation=False
687
+ )
688
+ def fit_transform(self, X, y=None):
689
+ """Fit the imputer on `X` and return the transformed `X`.
690
+
691
+ Parameters
692
+ ----------
693
+ X : array-like, shape (n_samples, n_features)
694
+ Input data, where `n_samples` is the number of samples and
695
+ `n_features` is the number of features.
696
+
697
+ y : Ignored
698
+ Not used, present for API consistency by convention.
699
+
700
+ Returns
701
+ -------
702
+ Xt : array-like, shape (n_samples, n_features)
703
+ The imputed input data.
704
+ """
705
+ self.random_state_ = getattr(
706
+ self, "random_state_", check_random_state(self.random_state)
707
+ )
708
+
709
+ if self.estimator is None:
710
+ from ..linear_model import BayesianRidge
711
+
712
+ self._estimator = BayesianRidge()
713
+ else:
714
+ self._estimator = clone(self.estimator)
715
+
716
+ self.imputation_sequence_ = []
717
+
718
+ self.initial_imputer_ = None
719
+
720
+ X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
721
+ X, in_fit=True
722
+ )
723
+
724
+ super()._fit_indicator(complete_mask)
725
+ X_indicator = super()._transform_indicator(complete_mask)
726
+
727
+ if self.max_iter == 0 or np.all(mask_missing_values):
728
+ self.n_iter_ = 0
729
+ return super()._concatenate_indicator(Xt, X_indicator)
730
+
731
+ # Edge case: a single feature. We return the initial ...
732
+ if Xt.shape[1] == 1:
733
+ self.n_iter_ = 0
734
+ return super()._concatenate_indicator(Xt, X_indicator)
735
+
736
+ self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
737
+ self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
738
+
739
+ if not np.all(np.greater(self._max_value, self._min_value)):
740
+ raise ValueError("One (or more) features have min_value >= max_value.")
741
+
742
+ # order in which to impute
743
+ # note this is probably too slow for large feature data (d > 100000)
744
+ # and a better way would be good.
745
+ # see: https://goo.gl/KyCNwj and subsequent comments
746
+ ordered_idx = self._get_ordered_idx(mask_missing_values)
747
+ self.n_features_with_missing_ = len(ordered_idx)
748
+
749
+ abs_corr_mat = self._get_abs_corr_mat(Xt)
750
+
751
+ n_samples, n_features = Xt.shape
752
+ if self.verbose > 0:
753
+ print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
754
+ start_t = time()
755
+ if not self.sample_posterior:
756
+ Xt_previous = Xt.copy()
757
+ normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
758
+ for self.n_iter_ in range(1, self.max_iter + 1):
759
+ if self.imputation_order == "random":
760
+ ordered_idx = self._get_ordered_idx(mask_missing_values)
761
+
762
+ for feat_idx in ordered_idx:
763
+ neighbor_feat_idx = self._get_neighbor_feat_idx(
764
+ n_features, feat_idx, abs_corr_mat
765
+ )
766
+ Xt, estimator = self._impute_one_feature(
767
+ Xt,
768
+ mask_missing_values,
769
+ feat_idx,
770
+ neighbor_feat_idx,
771
+ estimator=None,
772
+ fit_mode=True,
773
+ )
774
+ estimator_triplet = _ImputerTriplet(
775
+ feat_idx, neighbor_feat_idx, estimator
776
+ )
777
+ self.imputation_sequence_.append(estimator_triplet)
778
+
779
+ if self.verbose > 1:
780
+ print(
781
+ "[IterativeImputer] Ending imputation round "
782
+ "%d/%d, elapsed time %0.2f"
783
+ % (self.n_iter_, self.max_iter, time() - start_t)
784
+ )
785
+
786
+ if not self.sample_posterior:
787
+ inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
788
+ if self.verbose > 0:
789
+ print(
790
+ "[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
791
+ inf_norm, normalized_tol
792
+ )
793
+ )
794
+ if inf_norm < normalized_tol:
795
+ if self.verbose > 0:
796
+ print("[IterativeImputer] Early stopping criterion reached.")
797
+ break
798
+ Xt_previous = Xt.copy()
799
+ else:
800
+ if not self.sample_posterior:
801
+ warnings.warn(
802
+ "[IterativeImputer] Early stopping criterion not reached.",
803
+ ConvergenceWarning,
804
+ )
805
+ _assign_where(Xt, X, cond=~mask_missing_values)
806
+
807
+ return super()._concatenate_indicator(Xt, X_indicator)
808
+
809
+ def transform(self, X):
810
+ """Impute all missing values in `X`.
811
+
812
+ Note that this is stochastic, and that if `random_state` is not fixed,
813
+ repeated calls, or permuted input, results will differ.
814
+
815
+ Parameters
816
+ ----------
817
+ X : array-like of shape (n_samples, n_features)
818
+ The input data to complete.
819
+
820
+ Returns
821
+ -------
822
+ Xt : array-like, shape (n_samples, n_features)
823
+ The imputed input data.
824
+ """
825
+ check_is_fitted(self)
826
+
827
+ X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
828
+ X, in_fit=False
829
+ )
830
+
831
+ X_indicator = super()._transform_indicator(complete_mask)
832
+
833
+ if self.n_iter_ == 0 or np.all(mask_missing_values):
834
+ return super()._concatenate_indicator(Xt, X_indicator)
835
+
836
+ imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
837
+ i_rnd = 0
838
+ if self.verbose > 0:
839
+ print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
840
+ start_t = time()
841
+ for it, estimator_triplet in enumerate(self.imputation_sequence_):
842
+ Xt, _ = self._impute_one_feature(
843
+ Xt,
844
+ mask_missing_values,
845
+ estimator_triplet.feat_idx,
846
+ estimator_triplet.neighbor_feat_idx,
847
+ estimator=estimator_triplet.estimator,
848
+ fit_mode=False,
849
+ )
850
+ if not (it + 1) % imputations_per_round:
851
+ if self.verbose > 1:
852
+ print(
853
+ "[IterativeImputer] Ending imputation round "
854
+ "%d/%d, elapsed time %0.2f"
855
+ % (i_rnd + 1, self.n_iter_, time() - start_t)
856
+ )
857
+ i_rnd += 1
858
+
859
+ _assign_where(Xt, X, cond=~mask_missing_values)
860
+
861
+ return super()._concatenate_indicator(Xt, X_indicator)
862
+
863
+ def fit(self, X, y=None):
864
+ """Fit the imputer on `X` and return self.
865
+
866
+ Parameters
867
+ ----------
868
+ X : array-like, shape (n_samples, n_features)
869
+ Input data, where `n_samples` is the number of samples and
870
+ `n_features` is the number of features.
871
+
872
+ y : Ignored
873
+ Not used, present for API consistency by convention.
874
+
875
+ Returns
876
+ -------
877
+ self : object
878
+ Fitted estimator.
879
+ """
880
+ self.fit_transform(X)
881
+ return self
882
+
883
+ def get_feature_names_out(self, input_features=None):
884
+ """Get output feature names for transformation.
885
+
886
+ Parameters
887
+ ----------
888
+ input_features : array-like of str or None, default=None
889
+ Input features.
890
+
891
+ - If `input_features` is `None`, then `feature_names_in_` is
892
+ used as feature names in. If `feature_names_in_` is not defined,
893
+ then the following input feature names are generated:
894
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
895
+ - If `input_features` is an array-like, then `input_features` must
896
+ match `feature_names_in_` if `feature_names_in_` is defined.
897
+
898
+ Returns
899
+ -------
900
+ feature_names_out : ndarray of str objects
901
+ Transformed feature names.
902
+ """
903
+ check_is_fitted(self, "n_features_in_")
904
+ input_features = _check_feature_names_in(self, input_features)
905
+ names = self.initial_imputer_.get_feature_names_out(input_features)
906
+ return self._concatenate_indicator_feature_names_out(names, input_features)
llmeval-env/lib/python3.10/site-packages/sklearn/impute/_knn.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Ashim Bhattarai <[email protected]>
2
+ # Thomas J Fan <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Integral
6
+
7
+ import numpy as np
8
+
9
+ from ..base import _fit_context
10
+ from ..metrics import pairwise_distances_chunked
11
+ from ..metrics.pairwise import _NAN_METRICS
12
+ from ..neighbors._base import _get_weights
13
+ from ..utils import is_scalar_nan
14
+ from ..utils._mask import _get_mask
15
+ from ..utils._param_validation import Hidden, Interval, StrOptions
16
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
17
+ from ._base import _BaseImputer
18
+
19
+
20
+ class KNNImputer(_BaseImputer):
21
+ """Imputation for completing missing values using k-Nearest Neighbors.
22
+
23
+ Each sample's missing values are imputed using the mean value from
24
+ `n_neighbors` nearest neighbors found in the training set. Two samples are
25
+ close if the features that neither is missing are close.
26
+
27
+ Read more in the :ref:`User Guide <knnimpute>`.
28
+
29
+ .. versionadded:: 0.22
30
+
31
+ Parameters
32
+ ----------
33
+ missing_values : int, float, str, np.nan or None, default=np.nan
34
+ The placeholder for the missing values. All occurrences of
35
+ `missing_values` will be imputed. For pandas' dataframes with
36
+ nullable integer dtypes with missing values, `missing_values`
37
+ should be set to np.nan, since `pd.NA` will be converted to np.nan.
38
+
39
+ n_neighbors : int, default=5
40
+ Number of neighboring samples to use for imputation.
41
+
42
+ weights : {'uniform', 'distance'} or callable, default='uniform'
43
+ Weight function used in prediction. Possible values:
44
+
45
+ - 'uniform' : uniform weights. All points in each neighborhood are
46
+ weighted equally.
47
+ - 'distance' : weight points by the inverse of their distance.
48
+ in this case, closer neighbors of a query point will have a
49
+ greater influence than neighbors which are further away.
50
+ - callable : a user-defined function which accepts an
51
+ array of distances, and returns an array of the same shape
52
+ containing the weights.
53
+
54
+ metric : {'nan_euclidean'} or callable, default='nan_euclidean'
55
+ Distance metric for searching neighbors. Possible values:
56
+
57
+ - 'nan_euclidean'
58
+ - callable : a user-defined function which conforms to the definition
59
+ of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
60
+ accepts two arrays, X and Y, and a `missing_values` keyword in
61
+ `kwds` and returns a scalar distance value.
62
+
63
+ copy : bool, default=True
64
+ If True, a copy of X will be created. If False, imputation will
65
+ be done in-place whenever possible.
66
+
67
+ add_indicator : bool, default=False
68
+ If True, a :class:`MissingIndicator` transform will stack onto the
69
+ output of the imputer's transform. This allows a predictive estimator
70
+ to account for missingness despite imputation. If a feature has no
71
+ missing values at fit/train time, the feature won't appear on the
72
+ missing indicator even if there are missing values at transform/test
73
+ time.
74
+
75
+ keep_empty_features : bool, default=False
76
+ If True, features that consist exclusively of missing values when
77
+ `fit` is called are returned in results when `transform` is called.
78
+ The imputed value is always `0`.
79
+
80
+ .. versionadded:: 1.2
81
+
82
+ Attributes
83
+ ----------
84
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
85
+ Indicator used to add binary indicators for missing values.
86
+ ``None`` if add_indicator is False.
87
+
88
+ n_features_in_ : int
89
+ Number of features seen during :term:`fit`.
90
+
91
+ .. versionadded:: 0.24
92
+
93
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
94
+ Names of features seen during :term:`fit`. Defined only when `X`
95
+ has feature names that are all strings.
96
+
97
+ .. versionadded:: 1.0
98
+
99
+ See Also
100
+ --------
101
+ SimpleImputer : Univariate imputer for completing missing values
102
+ with simple strategies.
103
+ IterativeImputer : Multivariate imputer that estimates values to impute for
104
+ each feature with missing values from all the others.
105
+
106
+ References
107
+ ----------
108
+ * `Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
109
+ Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
110
+ value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
111
+ no. 6, 2001 Pages 520-525.
112
+ <https://academic.oup.com/bioinformatics/article/17/6/520/272365>`_
113
+
114
+ Examples
115
+ --------
116
+ >>> import numpy as np
117
+ >>> from sklearn.impute import KNNImputer
118
+ >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
119
+ >>> imputer = KNNImputer(n_neighbors=2)
120
+ >>> imputer.fit_transform(X)
121
+ array([[1. , 2. , 4. ],
122
+ [3. , 4. , 3. ],
123
+ [5.5, 6. , 5. ],
124
+ [8. , 8. , 7. ]])
125
+
126
+ For a more detailed example see
127
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`.
128
+ """
129
+
130
+ _parameter_constraints: dict = {
131
+ **_BaseImputer._parameter_constraints,
132
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
133
+ "weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)],
134
+ "metric": [StrOptions(set(_NAN_METRICS)), callable],
135
+ "copy": ["boolean"],
136
+ }
137
+
138
+ def __init__(
139
+ self,
140
+ *,
141
+ missing_values=np.nan,
142
+ n_neighbors=5,
143
+ weights="uniform",
144
+ metric="nan_euclidean",
145
+ copy=True,
146
+ add_indicator=False,
147
+ keep_empty_features=False,
148
+ ):
149
+ super().__init__(
150
+ missing_values=missing_values,
151
+ add_indicator=add_indicator,
152
+ keep_empty_features=keep_empty_features,
153
+ )
154
+ self.n_neighbors = n_neighbors
155
+ self.weights = weights
156
+ self.metric = metric
157
+ self.copy = copy
158
+
159
+ def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):
160
+ """Helper function to impute a single column.
161
+
162
+ Parameters
163
+ ----------
164
+ dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
165
+ Distance matrix between the receivers and potential donors from
166
+ training set. There must be at least one non-nan distance between
167
+ a receiver and a potential donor.
168
+
169
+ n_neighbors : int
170
+ Number of neighbors to consider.
171
+
172
+ fit_X_col : ndarray of shape (n_potential_donors,)
173
+ Column of potential donors from training set.
174
+
175
+ mask_fit_X_col : ndarray of shape (n_potential_donors,)
176
+ Missing mask for fit_X_col.
177
+
178
+ Returns
179
+ -------
180
+ imputed_values: ndarray of shape (n_receivers,)
181
+ Imputed values for receiver.
182
+ """
183
+ # Get donors
184
+ donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[
185
+ :, :n_neighbors
186
+ ]
187
+
188
+ # Get weight matrix from distance matrix
189
+ donors_dist = dist_pot_donors[
190
+ np.arange(donors_idx.shape[0])[:, None], donors_idx
191
+ ]
192
+
193
+ weight_matrix = _get_weights(donors_dist, self.weights)
194
+
195
+ # fill nans with zeros
196
+ if weight_matrix is not None:
197
+ weight_matrix[np.isnan(weight_matrix)] = 0.0
198
+
199
+ # Retrieve donor values and calculate kNN average
200
+ donors = fit_X_col.take(donors_idx)
201
+ donors_mask = mask_fit_X_col.take(donors_idx)
202
+ donors = np.ma.array(donors, mask=donors_mask)
203
+
204
+ return np.ma.average(donors, axis=1, weights=weight_matrix).data
205
+
206
+ @_fit_context(prefer_skip_nested_validation=True)
207
+ def fit(self, X, y=None):
208
+ """Fit the imputer on X.
209
+
210
+ Parameters
211
+ ----------
212
+ X : array-like shape of (n_samples, n_features)
213
+ Input data, where `n_samples` is the number of samples and
214
+ `n_features` is the number of features.
215
+
216
+ y : Ignored
217
+ Not used, present here for API consistency by convention.
218
+
219
+ Returns
220
+ -------
221
+ self : object
222
+ The fitted `KNNImputer` class instance.
223
+ """
224
+ # Check data integrity and calling arguments
225
+ if not is_scalar_nan(self.missing_values):
226
+ force_all_finite = True
227
+ else:
228
+ force_all_finite = "allow-nan"
229
+
230
+ X = self._validate_data(
231
+ X,
232
+ accept_sparse=False,
233
+ dtype=FLOAT_DTYPES,
234
+ force_all_finite=force_all_finite,
235
+ copy=self.copy,
236
+ )
237
+
238
+ self._fit_X = X
239
+ self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
240
+ self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
241
+
242
+ super()._fit_indicator(self._mask_fit_X)
243
+
244
+ return self
245
+
246
+ def transform(self, X):
247
+ """Impute all missing values in X.
248
+
249
+ Parameters
250
+ ----------
251
+ X : array-like of shape (n_samples, n_features)
252
+ The input data to complete.
253
+
254
+ Returns
255
+ -------
256
+ X : array-like of shape (n_samples, n_output_features)
257
+ The imputed dataset. `n_output_features` is the number of features
258
+ that is not always missing during `fit`.
259
+ """
260
+
261
+ check_is_fitted(self)
262
+ if not is_scalar_nan(self.missing_values):
263
+ force_all_finite = True
264
+ else:
265
+ force_all_finite = "allow-nan"
266
+ X = self._validate_data(
267
+ X,
268
+ accept_sparse=False,
269
+ dtype=FLOAT_DTYPES,
270
+ force_all_finite=force_all_finite,
271
+ copy=self.copy,
272
+ reset=False,
273
+ )
274
+
275
+ mask = _get_mask(X, self.missing_values)
276
+ mask_fit_X = self._mask_fit_X
277
+ valid_mask = self._valid_mask
278
+
279
+ X_indicator = super()._transform_indicator(mask)
280
+
281
+ # Removes columns where the training data is all nan
282
+ if not np.any(mask):
283
+ # No missing values in X
284
+ if self.keep_empty_features:
285
+ Xc = X
286
+ Xc[:, ~valid_mask] = 0
287
+ else:
288
+ Xc = X[:, valid_mask]
289
+
290
+ # Even if there are no missing values in X, we still concatenate Xc
291
+ # with the missing value indicator matrix, X_indicator.
292
+ # This is to ensure that the output maintains consistency in terms
293
+ # of columns, regardless of whether missing values exist in X or not.
294
+ return super()._concatenate_indicator(Xc, X_indicator)
295
+
296
+ row_missing_idx = np.flatnonzero(mask.any(axis=1))
297
+
298
+ non_missing_fix_X = np.logical_not(mask_fit_X)
299
+
300
+ # Maps from indices from X to indices in dist matrix
301
+ dist_idx_map = np.zeros(X.shape[0], dtype=int)
302
+ dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
303
+
304
+ def process_chunk(dist_chunk, start):
305
+ row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
306
+
307
+ # Find and impute missing by column
308
+ for col in range(X.shape[1]):
309
+ if not valid_mask[col]:
310
+ # column was all missing during training
311
+ continue
312
+
313
+ col_mask = mask[row_missing_chunk, col]
314
+ if not np.any(col_mask):
315
+ # column has no missing values
316
+ continue
317
+
318
+ (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
319
+
320
+ # receivers_idx are indices in X
321
+ receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
322
+
323
+ # distances for samples that needed imputation for column
324
+ dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
325
+ :, potential_donors_idx
326
+ ]
327
+
328
+ # receivers with all nan distances impute with mean
329
+ all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
330
+ all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
331
+
332
+ if all_nan_receivers_idx.size:
333
+ col_mean = np.ma.array(
334
+ self._fit_X[:, col], mask=mask_fit_X[:, col]
335
+ ).mean()
336
+ X[all_nan_receivers_idx, col] = col_mean
337
+
338
+ if len(all_nan_receivers_idx) == len(receivers_idx):
339
+ # all receivers imputed with mean
340
+ continue
341
+
342
+ # receivers with at least one defined distance
343
+ receivers_idx = receivers_idx[~all_nan_dist_mask]
344
+ dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
345
+ :, potential_donors_idx
346
+ ]
347
+
348
+ n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
349
+ value = self._calc_impute(
350
+ dist_subset,
351
+ n_neighbors,
352
+ self._fit_X[potential_donors_idx, col],
353
+ mask_fit_X[potential_donors_idx, col],
354
+ )
355
+ X[receivers_idx, col] = value
356
+
357
+ # process in fixed-memory chunks
358
+ gen = pairwise_distances_chunked(
359
+ X[row_missing_idx, :],
360
+ self._fit_X,
361
+ metric=self.metric,
362
+ missing_values=self.missing_values,
363
+ force_all_finite=force_all_finite,
364
+ reduce_func=process_chunk,
365
+ )
366
+ for chunk in gen:
367
+ # process_chunk modifies X in place. No return value.
368
+ pass
369
+
370
+ if self.keep_empty_features:
371
+ Xc = X
372
+ Xc[:, ~valid_mask] = 0
373
+ else:
374
+ Xc = X[:, valid_mask]
375
+
376
+ return super()._concatenate_indicator(Xc, X_indicator)
377
+
378
+ def get_feature_names_out(self, input_features=None):
379
+ """Get output feature names for transformation.
380
+
381
+ Parameters
382
+ ----------
383
+ input_features : array-like of str or None, default=None
384
+ Input features.
385
+
386
+ - If `input_features` is `None`, then `feature_names_in_` is
387
+ used as feature names in. If `feature_names_in_` is not defined,
388
+ then the following input feature names are generated:
389
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
390
+ - If `input_features` is an array-like, then `input_features` must
391
+ match `feature_names_in_` if `feature_names_in_` is defined.
392
+
393
+ Returns
394
+ -------
395
+ feature_names_out : ndarray of str objects
396
+ Transformed feature names.
397
+ """
398
+ check_is_fitted(self, "n_features_in_")
399
+ input_features = _check_feature_names_in(self, input_features)
400
+ names = input_features[self._valid_mask]
401
+ return self._concatenate_indicator_feature_names_out(names, input_features)