applied-ai-018 commited on
Commit
23c294a
·
verified ·
1 Parent(s): 7481afc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. venv/lib/python3.10/site-packages/sklearn/decomposition/__init__.py +52 -0
  4. venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/sklearn/decomposition/_base.py +193 -0
  9. venv/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  10. venv/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py +2301 -0
  11. venv/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py +458 -0
  12. venv/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py +795 -0
  13. venv/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py +409 -0
  14. venv/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py +572 -0
  15. venv/lib/python3.10/site-packages/sklearn/decomposition/_lda.py +929 -0
  16. venv/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py +2443 -0
  17. venv/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  18. venv/lib/python3.10/site-packages/sklearn/decomposition/_pca.py +747 -0
  19. venv/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py +551 -0
  20. venv/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py +319 -0
  21. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py +116 -0
  34. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py +451 -0
  35. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py +452 -0
  36. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py +566 -0
  37. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py +1062 -0
  38. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py +477 -0
  39. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py +987 -0
  40. venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py +212 -0
  41. venv/lib/python3.10/site-packages/sklearn/ensemble/__init__.py +44 -0
  42. venv/lib/python3.10/site-packages/sklearn/ensemble/_bagging.py +1242 -0
  43. venv/lib/python3.10/site-packages/sklearn/ensemble/_base.py +301 -0
  44. venv/lib/python3.10/site-packages/sklearn/ensemble/_forest.py +0 -0
  45. venv/lib/python3.10/site-packages/sklearn/ensemble/_gb.py +2168 -0
  46. venv/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so +0 -0
  47. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py +5 -0
  48. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so +0 -0
  49. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so +0 -0
  50. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so +0 -0
ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa0433a7a7c158702a1b2b176fec02c78490e6c9b78b7d112ad9b6f86cdf7a36
3
+ size 33555612
ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c02430b06b5f09af241a4e5cee182e9e1d5715c08acdd027be18ffa44f13abc
3
+ size 33555627
venv/lib/python3.10/site-packages/sklearn/decomposition/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.decomposition` module includes matrix decomposition
3
+ algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
4
+ this module can be regarded as dimensionality reduction techniques.
5
+ """
6
+
7
+
8
+ from ..utils.extmath import randomized_svd
9
+ from ._dict_learning import (
10
+ DictionaryLearning,
11
+ MiniBatchDictionaryLearning,
12
+ SparseCoder,
13
+ dict_learning,
14
+ dict_learning_online,
15
+ sparse_encode,
16
+ )
17
+ from ._factor_analysis import FactorAnalysis
18
+ from ._fastica import FastICA, fastica
19
+ from ._incremental_pca import IncrementalPCA
20
+ from ._kernel_pca import KernelPCA
21
+ from ._lda import LatentDirichletAllocation
22
+ from ._nmf import (
23
+ NMF,
24
+ MiniBatchNMF,
25
+ non_negative_factorization,
26
+ )
27
+ from ._pca import PCA
28
+ from ._sparse_pca import MiniBatchSparsePCA, SparsePCA
29
+ from ._truncated_svd import TruncatedSVD
30
+
31
+ __all__ = [
32
+ "DictionaryLearning",
33
+ "FastICA",
34
+ "IncrementalPCA",
35
+ "KernelPCA",
36
+ "MiniBatchDictionaryLearning",
37
+ "MiniBatchNMF",
38
+ "MiniBatchSparsePCA",
39
+ "NMF",
40
+ "PCA",
41
+ "SparseCoder",
42
+ "SparsePCA",
43
+ "dict_learning",
44
+ "dict_learning_online",
45
+ "fastica",
46
+ "non_negative_factorization",
47
+ "randomized_svd",
48
+ "sparse_encode",
49
+ "FactorAnalysis",
50
+ "TruncatedSVD",
51
+ "LatentDirichletAllocation",
52
+ ]
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc ADDED
Binary file (5.75 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc ADDED
Binary file (64.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/_base.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Principal Component Analysis Base Classes"""
2
+
3
+ # Author: Alexandre Gramfort <[email protected]>
4
+ # Olivier Grisel <[email protected]>
5
+ # Mathieu Blondel <[email protected]>
6
+ # Denis A. Engemann <[email protected]>
7
+ # Kyle Kastner <[email protected]>
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ import numpy as np
14
+ from scipy import linalg
15
+ from scipy.sparse import issparse
16
+
17
+ from ..base import BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin
18
+ from ..utils._array_api import _add_to_diagonal, device, get_namespace
19
+ from ..utils.sparsefuncs import _implicit_column_offset
20
+ from ..utils.validation import check_is_fitted
21
+
22
+
23
+ class _BasePCA(
24
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
25
+ ):
26
+ """Base class for PCA methods.
27
+
28
+ Warning: This class should not be used directly.
29
+ Use derived classes instead.
30
+ """
31
+
32
+ def get_covariance(self):
33
+ """Compute data covariance with the generative model.
34
+
35
+ ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
36
+ where S**2 contains the explained variances, and sigma2 contains the
37
+ noise variances.
38
+
39
+ Returns
40
+ -------
41
+ cov : array of shape=(n_features, n_features)
42
+ Estimated covariance of data.
43
+ """
44
+ xp, _ = get_namespace(self.components_)
45
+
46
+ components_ = self.components_
47
+ exp_var = self.explained_variance_
48
+ if self.whiten:
49
+ components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
50
+ exp_var_diff = exp_var - self.noise_variance_
51
+ exp_var_diff = xp.where(
52
+ exp_var > self.noise_variance_,
53
+ exp_var_diff,
54
+ xp.asarray(0.0, device=device(exp_var)),
55
+ )
56
+ cov = (components_.T * exp_var_diff) @ components_
57
+ _add_to_diagonal(cov, self.noise_variance_, xp)
58
+ return cov
59
+
60
+ def get_precision(self):
61
+ """Compute data precision matrix with the generative model.
62
+
63
+ Equals the inverse of the covariance but computed with
64
+ the matrix inversion lemma for efficiency.
65
+
66
+ Returns
67
+ -------
68
+ precision : array, shape=(n_features, n_features)
69
+ Estimated precision of data.
70
+ """
71
+ xp, is_array_api_compliant = get_namespace(self.components_)
72
+
73
+ n_features = self.components_.shape[1]
74
+
75
+ # handle corner cases first
76
+ if self.n_components_ == 0:
77
+ return xp.eye(n_features) / self.noise_variance_
78
+
79
+ if is_array_api_compliant:
80
+ linalg_inv = xp.linalg.inv
81
+ else:
82
+ linalg_inv = linalg.inv
83
+
84
+ if self.noise_variance_ == 0.0:
85
+ return linalg_inv(self.get_covariance())
86
+
87
+ # Get precision using matrix inversion lemma
88
+ components_ = self.components_
89
+ exp_var = self.explained_variance_
90
+ if self.whiten:
91
+ components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
92
+ exp_var_diff = exp_var - self.noise_variance_
93
+ exp_var_diff = xp.where(
94
+ exp_var > self.noise_variance_,
95
+ exp_var_diff,
96
+ xp.asarray(0.0, device=device(exp_var)),
97
+ )
98
+ precision = components_ @ components_.T / self.noise_variance_
99
+ _add_to_diagonal(precision, 1.0 / exp_var_diff, xp)
100
+ precision = components_.T @ linalg_inv(precision) @ components_
101
+ precision /= -(self.noise_variance_**2)
102
+ _add_to_diagonal(precision, 1.0 / self.noise_variance_, xp)
103
+ return precision
104
+
105
+ @abstractmethod
106
+ def fit(self, X, y=None):
107
+ """Placeholder for fit. Subclasses should implement this method!
108
+
109
+ Fit the model with X.
110
+
111
+ Parameters
112
+ ----------
113
+ X : array-like of shape (n_samples, n_features)
114
+ Training data, where `n_samples` is the number of samples and
115
+ `n_features` is the number of features.
116
+
117
+ Returns
118
+ -------
119
+ self : object
120
+ Returns the instance itself.
121
+ """
122
+
123
+ def transform(self, X):
124
+ """Apply dimensionality reduction to X.
125
+
126
+ X is projected on the first principal components previously extracted
127
+ from a training set.
128
+
129
+ Parameters
130
+ ----------
131
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
132
+ New data, where `n_samples` is the number of samples
133
+ and `n_features` is the number of features.
134
+
135
+ Returns
136
+ -------
137
+ X_new : array-like of shape (n_samples, n_components)
138
+ Projection of X in the first principal components, where `n_samples`
139
+ is the number of samples and `n_components` is the number of the components.
140
+ """
141
+ xp, _ = get_namespace(X)
142
+
143
+ check_is_fitted(self)
144
+
145
+ X = self._validate_data(
146
+ X, accept_sparse=("csr", "csc"), dtype=[xp.float64, xp.float32], reset=False
147
+ )
148
+ if self.mean_ is not None:
149
+ if issparse(X):
150
+ X = _implicit_column_offset(X, self.mean_)
151
+ else:
152
+ X = X - self.mean_
153
+ X_transformed = X @ self.components_.T
154
+ if self.whiten:
155
+ X_transformed /= xp.sqrt(self.explained_variance_)
156
+ return X_transformed
157
+
158
+ def inverse_transform(self, X):
159
+ """Transform data back to its original space.
160
+
161
+ In other words, return an input `X_original` whose transform would be X.
162
+
163
+ Parameters
164
+ ----------
165
+ X : array-like of shape (n_samples, n_components)
166
+ New data, where `n_samples` is the number of samples
167
+ and `n_components` is the number of components.
168
+
169
+ Returns
170
+ -------
171
+ X_original array-like of shape (n_samples, n_features)
172
+ Original data, where `n_samples` is the number of samples
173
+ and `n_features` is the number of features.
174
+
175
+ Notes
176
+ -----
177
+ If whitening is enabled, inverse_transform will compute the
178
+ exact inverse operation, which includes reversing whitening.
179
+ """
180
+ xp, _ = get_namespace(X)
181
+
182
+ if self.whiten:
183
+ scaled_components = (
184
+ xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_
185
+ )
186
+ return X @ scaled_components + self.mean_
187
+ else:
188
+ return X @ self.components_ + self.mean_
189
+
190
+ @property
191
+ def _n_features_out(self):
192
+ """Number of transformed output features."""
193
+ return self.components_.shape[0]
venv/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (246 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py ADDED
@@ -0,0 +1,2301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Dictionary learning.
2
+ """
3
+ # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
4
+ # License: BSD 3 clause
5
+
6
+ import itertools
7
+ import sys
8
+ import time
9
+ from numbers import Integral, Real
10
+ from warnings import warn
11
+
12
+ import numpy as np
13
+ from joblib import effective_n_jobs
14
+ from scipy import linalg
15
+
16
+ from ..base import (
17
+ BaseEstimator,
18
+ ClassNamePrefixFeaturesOutMixin,
19
+ TransformerMixin,
20
+ _fit_context,
21
+ )
22
+ from ..linear_model import Lars, Lasso, LassoLars, orthogonal_mp_gram
23
+ from ..utils import check_array, check_random_state, gen_batches, gen_even_slices
24
+ from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
25
+ from ..utils.extmath import randomized_svd, row_norms, svd_flip
26
+ from ..utils.parallel import Parallel, delayed
27
+ from ..utils.validation import check_is_fitted
28
+
29
+
30
+ def _check_positive_coding(method, positive):
31
+ if positive and method in ["omp", "lars"]:
32
+ raise ValueError(
33
+ "Positive constraint not supported for '{}' coding method.".format(method)
34
+ )
35
+
36
+
37
+ def _sparse_encode_precomputed(
38
+ X,
39
+ dictionary,
40
+ *,
41
+ gram=None,
42
+ cov=None,
43
+ algorithm="lasso_lars",
44
+ regularization=None,
45
+ copy_cov=True,
46
+ init=None,
47
+ max_iter=1000,
48
+ verbose=0,
49
+ positive=False,
50
+ ):
51
+ """Generic sparse coding with precomputed Gram and/or covariance matrices.
52
+
53
+ Each row of the result is the solution to a Lasso problem.
54
+
55
+ Parameters
56
+ ----------
57
+ X : ndarray of shape (n_samples, n_features)
58
+ Data matrix.
59
+
60
+ dictionary : ndarray of shape (n_components, n_features)
61
+ The dictionary matrix against which to solve the sparse coding of
62
+ the data. Some of the algorithms assume normalized rows.
63
+
64
+ gram : ndarray of shape (n_components, n_components), default=None
65
+ Precomputed Gram matrix, `dictionary * dictionary'`
66
+ gram can be `None` if method is 'threshold'.
67
+
68
+ cov : ndarray of shape (n_components, n_samples), default=None
69
+ Precomputed covariance, `dictionary * X'`.
70
+
71
+ algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
72
+ default='lasso_lars'
73
+ The algorithm used:
74
+
75
+ * `'lars'`: uses the least angle regression method
76
+ (`linear_model.lars_path`);
77
+ * `'lasso_lars'`: uses Lars to compute the Lasso solution;
78
+ * `'lasso_cd'`: uses the coordinate descent method to compute the
79
+ Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
80
+ the estimated components are sparse;
81
+ * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
82
+ solution;
83
+ * `'threshold'`: squashes to zero all coefficients less than
84
+ regularization from the projection `dictionary * data'`.
85
+
86
+ regularization : int or float, default=None
87
+ The regularization parameter. It corresponds to alpha when
88
+ algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
89
+ Otherwise it corresponds to `n_nonzero_coefs`.
90
+
91
+ init : ndarray of shape (n_samples, n_components), default=None
92
+ Initialization value of the sparse code. Only used if
93
+ `algorithm='lasso_cd'`.
94
+
95
+ max_iter : int, default=1000
96
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
97
+ `'lasso_lars'`.
98
+
99
+ copy_cov : bool, default=True
100
+ Whether to copy the precomputed covariance matrix; if `False`, it may
101
+ be overwritten.
102
+
103
+ verbose : int, default=0
104
+ Controls the verbosity; the higher, the more messages.
105
+
106
+ positive: bool, default=False
107
+ Whether to enforce a positivity constraint on the sparse code.
108
+
109
+ .. versionadded:: 0.20
110
+
111
+ Returns
112
+ -------
113
+ code : ndarray of shape (n_components, n_features)
114
+ The sparse codes.
115
+ """
116
+ n_samples, n_features = X.shape
117
+ n_components = dictionary.shape[0]
118
+
119
+ if algorithm == "lasso_lars":
120
+ alpha = float(regularization) / n_features # account for scaling
121
+ try:
122
+ err_mgt = np.seterr(all="ignore")
123
+
124
+ # Not passing in verbose=max(0, verbose-1) because Lars.fit already
125
+ # corrects the verbosity level.
126
+ lasso_lars = LassoLars(
127
+ alpha=alpha,
128
+ fit_intercept=False,
129
+ verbose=verbose,
130
+ precompute=gram,
131
+ fit_path=False,
132
+ positive=positive,
133
+ max_iter=max_iter,
134
+ )
135
+ lasso_lars.fit(dictionary.T, X.T, Xy=cov)
136
+ new_code = lasso_lars.coef_
137
+ finally:
138
+ np.seterr(**err_mgt)
139
+
140
+ elif algorithm == "lasso_cd":
141
+ alpha = float(regularization) / n_features # account for scaling
142
+
143
+ # TODO: Make verbosity argument for Lasso?
144
+ # sklearn.linear_model.coordinate_descent.enet_path has a verbosity
145
+ # argument that we could pass in from Lasso.
146
+ clf = Lasso(
147
+ alpha=alpha,
148
+ fit_intercept=False,
149
+ precompute=gram,
150
+ max_iter=max_iter,
151
+ warm_start=True,
152
+ positive=positive,
153
+ )
154
+
155
+ if init is not None:
156
+ # In some workflows using coordinate descent algorithms:
157
+ # - users might provide NumPy arrays with read-only buffers
158
+ # - `joblib` might memmap arrays making their buffer read-only
159
+ # TODO: move this handling (which is currently too broad)
160
+ # closer to the actual private function which need buffers to be writable.
161
+ if not init.flags["WRITEABLE"]:
162
+ init = np.array(init)
163
+ clf.coef_ = init
164
+
165
+ clf.fit(dictionary.T, X.T, check_input=False)
166
+ new_code = clf.coef_
167
+
168
+ elif algorithm == "lars":
169
+ try:
170
+ err_mgt = np.seterr(all="ignore")
171
+
172
+ # Not passing in verbose=max(0, verbose-1) because Lars.fit already
173
+ # corrects the verbosity level.
174
+ lars = Lars(
175
+ fit_intercept=False,
176
+ verbose=verbose,
177
+ precompute=gram,
178
+ n_nonzero_coefs=int(regularization),
179
+ fit_path=False,
180
+ )
181
+ lars.fit(dictionary.T, X.T, Xy=cov)
182
+ new_code = lars.coef_
183
+ finally:
184
+ np.seterr(**err_mgt)
185
+
186
+ elif algorithm == "threshold":
187
+ new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T
188
+ if positive:
189
+ np.clip(new_code, 0, None, out=new_code)
190
+
191
+ elif algorithm == "omp":
192
+ new_code = orthogonal_mp_gram(
193
+ Gram=gram,
194
+ Xy=cov,
195
+ n_nonzero_coefs=int(regularization),
196
+ tol=None,
197
+ norms_squared=row_norms(X, squared=True),
198
+ copy_Xy=copy_cov,
199
+ ).T
200
+
201
+ return new_code.reshape(n_samples, n_components)
202
+
203
+
204
+ @validate_params(
205
+ {
206
+ "X": ["array-like"],
207
+ "dictionary": ["array-like"],
208
+ "gram": ["array-like", None],
209
+ "cov": ["array-like", None],
210
+ "algorithm": [
211
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
212
+ ],
213
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
214
+ "alpha": [Interval(Real, 0, None, closed="left"), None],
215
+ "copy_cov": ["boolean"],
216
+ "init": ["array-like", None],
217
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
218
+ "n_jobs": [Integral, None],
219
+ "check_input": ["boolean"],
220
+ "verbose": ["verbose"],
221
+ "positive": ["boolean"],
222
+ },
223
+ prefer_skip_nested_validation=True,
224
+ )
225
+ # XXX : could be moved to the linear_model module
226
+ def sparse_encode(
227
+ X,
228
+ dictionary,
229
+ *,
230
+ gram=None,
231
+ cov=None,
232
+ algorithm="lasso_lars",
233
+ n_nonzero_coefs=None,
234
+ alpha=None,
235
+ copy_cov=True,
236
+ init=None,
237
+ max_iter=1000,
238
+ n_jobs=None,
239
+ check_input=True,
240
+ verbose=0,
241
+ positive=False,
242
+ ):
243
+ """Sparse coding.
244
+
245
+ Each row of the result is the solution to a sparse coding problem.
246
+ The goal is to find a sparse array `code` such that::
247
+
248
+ X ~= code * dictionary
249
+
250
+ Read more in the :ref:`User Guide <SparseCoder>`.
251
+
252
+ Parameters
253
+ ----------
254
+ X : array-like of shape (n_samples, n_features)
255
+ Data matrix.
256
+
257
+ dictionary : array-like of shape (n_components, n_features)
258
+ The dictionary matrix against which to solve the sparse coding of
259
+ the data. Some of the algorithms assume normalized rows for meaningful
260
+ output.
261
+
262
+ gram : array-like of shape (n_components, n_components), default=None
263
+ Precomputed Gram matrix, `dictionary * dictionary'`.
264
+
265
+ cov : array-like of shape (n_components, n_samples), default=None
266
+ Precomputed covariance, `dictionary' * X`.
267
+
268
+ algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
269
+ default='lasso_lars'
270
+ The algorithm used:
271
+
272
+ * `'lars'`: uses the least angle regression method
273
+ (`linear_model.lars_path`);
274
+ * `'lasso_lars'`: uses Lars to compute the Lasso solution;
275
+ * `'lasso_cd'`: uses the coordinate descent method to compute the
276
+ Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
277
+ the estimated components are sparse;
278
+ * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
279
+ solution;
280
+ * `'threshold'`: squashes to zero all coefficients less than
281
+ regularization from the projection `dictionary * data'`.
282
+
283
+ n_nonzero_coefs : int, default=None
284
+ Number of nonzero coefficients to target in each column of the
285
+ solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
286
+ and is overridden by `alpha` in the `omp` case. If `None`, then
287
+ `n_nonzero_coefs=int(n_features / 10)`.
288
+
289
+ alpha : float, default=None
290
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
291
+ penalty applied to the L1 norm.
292
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
293
+ threshold below which coefficients will be squashed to zero.
294
+ If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
295
+ the reconstruction error targeted. In this case, it overrides
296
+ `n_nonzero_coefs`.
297
+ If `None`, default to 1.
298
+
299
+ copy_cov : bool, default=True
300
+ Whether to copy the precomputed covariance matrix; if `False`, it may
301
+ be overwritten.
302
+
303
+ init : ndarray of shape (n_samples, n_components), default=None
304
+ Initialization value of the sparse codes. Only used if
305
+ `algorithm='lasso_cd'`.
306
+
307
+ max_iter : int, default=1000
308
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
309
+ `'lasso_lars'`.
310
+
311
+ n_jobs : int, default=None
312
+ Number of parallel jobs to run.
313
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
314
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
315
+ for more details.
316
+
317
+ check_input : bool, default=True
318
+ If `False`, the input arrays X and dictionary will not be checked.
319
+
320
+ verbose : int, default=0
321
+ Controls the verbosity; the higher, the more messages.
322
+
323
+ positive : bool, default=False
324
+ Whether to enforce positivity when finding the encoding.
325
+
326
+ .. versionadded:: 0.20
327
+
328
+ Returns
329
+ -------
330
+ code : ndarray of shape (n_samples, n_components)
331
+ The sparse codes.
332
+
333
+ See Also
334
+ --------
335
+ sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso
336
+ path using LARS algorithm.
337
+ sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems.
338
+ sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
339
+ SparseCoder : Find a sparse representation of data from a fixed precomputed
340
+ dictionary.
341
+
342
+ Examples
343
+ --------
344
+ >>> import numpy as np
345
+ >>> from sklearn.decomposition import sparse_encode
346
+ >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
347
+ >>> dictionary = np.array(
348
+ ... [[0, 1, 0],
349
+ ... [-1, -1, 2],
350
+ ... [1, 1, 1],
351
+ ... [0, 1, 1],
352
+ ... [0, 2, 1]],
353
+ ... dtype=np.float64
354
+ ... )
355
+ >>> sparse_encode(X, dictionary, alpha=1e-10)
356
+ array([[ 0., 0., -1., 0., 0.],
357
+ [ 0., 1., 1., 0., 0.]])
358
+ """
359
+ if check_input:
360
+ if algorithm == "lasso_cd":
361
+ dictionary = check_array(
362
+ dictionary, order="C", dtype=[np.float64, np.float32]
363
+ )
364
+ X = check_array(X, order="C", dtype=[np.float64, np.float32])
365
+ else:
366
+ dictionary = check_array(dictionary)
367
+ X = check_array(X)
368
+
369
+ if dictionary.shape[1] != X.shape[1]:
370
+ raise ValueError(
371
+ "Dictionary and X have different numbers of features:"
372
+ "dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape)
373
+ )
374
+
375
+ _check_positive_coding(algorithm, positive)
376
+
377
+ return _sparse_encode(
378
+ X,
379
+ dictionary,
380
+ gram=gram,
381
+ cov=cov,
382
+ algorithm=algorithm,
383
+ n_nonzero_coefs=n_nonzero_coefs,
384
+ alpha=alpha,
385
+ copy_cov=copy_cov,
386
+ init=init,
387
+ max_iter=max_iter,
388
+ n_jobs=n_jobs,
389
+ verbose=verbose,
390
+ positive=positive,
391
+ )
392
+
393
+
394
+ def _sparse_encode(
395
+ X,
396
+ dictionary,
397
+ *,
398
+ gram=None,
399
+ cov=None,
400
+ algorithm="lasso_lars",
401
+ n_nonzero_coefs=None,
402
+ alpha=None,
403
+ copy_cov=True,
404
+ init=None,
405
+ max_iter=1000,
406
+ n_jobs=None,
407
+ verbose=0,
408
+ positive=False,
409
+ ):
410
+ """Sparse coding without input/parameter validation."""
411
+
412
+ n_samples, n_features = X.shape
413
+ n_components = dictionary.shape[0]
414
+
415
+ if algorithm in ("lars", "omp"):
416
+ regularization = n_nonzero_coefs
417
+ if regularization is None:
418
+ regularization = min(max(n_features / 10, 1), n_components)
419
+ else:
420
+ regularization = alpha
421
+ if regularization is None:
422
+ regularization = 1.0
423
+
424
+ if gram is None and algorithm != "threshold":
425
+ gram = np.dot(dictionary, dictionary.T)
426
+
427
+ if cov is None and algorithm != "lasso_cd":
428
+ copy_cov = False
429
+ cov = np.dot(dictionary, X.T)
430
+
431
+ if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold":
432
+ code = _sparse_encode_precomputed(
433
+ X,
434
+ dictionary,
435
+ gram=gram,
436
+ cov=cov,
437
+ algorithm=algorithm,
438
+ regularization=regularization,
439
+ copy_cov=copy_cov,
440
+ init=init,
441
+ max_iter=max_iter,
442
+ verbose=verbose,
443
+ positive=positive,
444
+ )
445
+ return code
446
+
447
+ # Enter parallel code block
448
+ n_samples = X.shape[0]
449
+ n_components = dictionary.shape[0]
450
+ code = np.empty((n_samples, n_components))
451
+ slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
452
+
453
+ code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
454
+ delayed(_sparse_encode_precomputed)(
455
+ X[this_slice],
456
+ dictionary,
457
+ gram=gram,
458
+ cov=cov[:, this_slice] if cov is not None else None,
459
+ algorithm=algorithm,
460
+ regularization=regularization,
461
+ copy_cov=copy_cov,
462
+ init=init[this_slice] if init is not None else None,
463
+ max_iter=max_iter,
464
+ verbose=verbose,
465
+ positive=positive,
466
+ )
467
+ for this_slice in slices
468
+ )
469
+ for this_slice, this_view in zip(slices, code_views):
470
+ code[this_slice] = this_view
471
+ return code
472
+
473
+
474
+ def _update_dict(
475
+ dictionary,
476
+ Y,
477
+ code,
478
+ A=None,
479
+ B=None,
480
+ verbose=False,
481
+ random_state=None,
482
+ positive=False,
483
+ ):
484
+ """Update the dense dictionary factor in place.
485
+
486
+ Parameters
487
+ ----------
488
+ dictionary : ndarray of shape (n_components, n_features)
489
+ Value of the dictionary at the previous iteration.
490
+
491
+ Y : ndarray of shape (n_samples, n_features)
492
+ Data matrix.
493
+
494
+ code : ndarray of shape (n_samples, n_components)
495
+ Sparse coding of the data against which to optimize the dictionary.
496
+
497
+ A : ndarray of shape (n_components, n_components), default=None
498
+ Together with `B`, sufficient stats of the online model to update the
499
+ dictionary.
500
+
501
+ B : ndarray of shape (n_features, n_components), default=None
502
+ Together with `A`, sufficient stats of the online model to update the
503
+ dictionary.
504
+
505
+ verbose: bool, default=False
506
+ Degree of output the procedure will print.
507
+
508
+ random_state : int, RandomState instance or None, default=None
509
+ Used for randomly initializing the dictionary. Pass an int for
510
+ reproducible results across multiple function calls.
511
+ See :term:`Glossary <random_state>`.
512
+
513
+ positive : bool, default=False
514
+ Whether to enforce positivity when finding the dictionary.
515
+
516
+ .. versionadded:: 0.20
517
+ """
518
+ n_samples, n_components = code.shape
519
+ random_state = check_random_state(random_state)
520
+
521
+ if A is None:
522
+ A = code.T @ code
523
+ if B is None:
524
+ B = Y.T @ code
525
+
526
+ n_unused = 0
527
+
528
+ for k in range(n_components):
529
+ if A[k, k] > 1e-6:
530
+ # 1e-6 is arbitrary but consistent with the spams implementation
531
+ dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
532
+ else:
533
+ # kth atom is almost never used -> sample a new one from the data
534
+ newd = Y[random_state.choice(n_samples)]
535
+
536
+ # add small noise to avoid making the sparse coding ill conditioned
537
+ noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
538
+ noise = random_state.normal(0, noise_level, size=len(newd))
539
+
540
+ dictionary[k] = newd + noise
541
+ code[:, k] = 0
542
+ n_unused += 1
543
+
544
+ if positive:
545
+ np.clip(dictionary[k], 0, None, out=dictionary[k])
546
+
547
+ # Projection on the constraint set ||V_k|| <= 1
548
+ dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
549
+
550
+ if verbose and n_unused > 0:
551
+ print(f"{n_unused} unused atoms resampled.")
552
+
553
+
554
+ def _dict_learning(
555
+ X,
556
+ n_components,
557
+ *,
558
+ alpha,
559
+ max_iter,
560
+ tol,
561
+ method,
562
+ n_jobs,
563
+ dict_init,
564
+ code_init,
565
+ callback,
566
+ verbose,
567
+ random_state,
568
+ return_n_iter,
569
+ positive_dict,
570
+ positive_code,
571
+ method_max_iter,
572
+ ):
573
+ """Main dictionary learning algorithm"""
574
+ t0 = time.time()
575
+ # Init the code and the dictionary with SVD of Y
576
+ if code_init is not None and dict_init is not None:
577
+ code = np.array(code_init, order="F")
578
+ # Don't copy V, it will happen below
579
+ dictionary = dict_init
580
+ else:
581
+ code, S, dictionary = linalg.svd(X, full_matrices=False)
582
+ # flip the initial code's sign to enforce deterministic output
583
+ code, dictionary = svd_flip(code, dictionary)
584
+ dictionary = S[:, np.newaxis] * dictionary
585
+ r = len(dictionary)
586
+ if n_components <= r: # True even if n_components=None
587
+ code = code[:, :n_components]
588
+ dictionary = dictionary[:n_components, :]
589
+ else:
590
+ code = np.c_[code, np.zeros((len(code), n_components - r))]
591
+ dictionary = np.r_[
592
+ dictionary, np.zeros((n_components - r, dictionary.shape[1]))
593
+ ]
594
+
595
+ # Fortran-order dict better suited for the sparse coding which is the
596
+ # bottleneck of this algorithm.
597
+ dictionary = np.asfortranarray(dictionary)
598
+
599
+ errors = []
600
+ current_cost = np.nan
601
+
602
+ if verbose == 1:
603
+ print("[dict_learning]", end=" ")
604
+
605
+ # If max_iter is 0, number of iterations returned should be zero
606
+ ii = -1
607
+
608
+ for ii in range(max_iter):
609
+ dt = time.time() - t0
610
+ if verbose == 1:
611
+ sys.stdout.write(".")
612
+ sys.stdout.flush()
613
+ elif verbose:
614
+ print(
615
+ "Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
616
+ % (ii, dt, dt / 60, current_cost)
617
+ )
618
+
619
+ # Update code
620
+ code = sparse_encode(
621
+ X,
622
+ dictionary,
623
+ algorithm=method,
624
+ alpha=alpha,
625
+ init=code,
626
+ n_jobs=n_jobs,
627
+ positive=positive_code,
628
+ max_iter=method_max_iter,
629
+ verbose=verbose,
630
+ )
631
+
632
+ # Update dictionary in place
633
+ _update_dict(
634
+ dictionary,
635
+ X,
636
+ code,
637
+ verbose=verbose,
638
+ random_state=random_state,
639
+ positive=positive_dict,
640
+ )
641
+
642
+ # Cost function
643
+ current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum(
644
+ np.abs(code)
645
+ )
646
+ errors.append(current_cost)
647
+
648
+ if ii > 0:
649
+ dE = errors[-2] - errors[-1]
650
+ # assert(dE >= -tol * errors[-1])
651
+ if dE < tol * errors[-1]:
652
+ if verbose == 1:
653
+ # A line return
654
+ print("")
655
+ elif verbose:
656
+ print("--- Convergence reached after %d iterations" % ii)
657
+ break
658
+ if ii % 5 == 0 and callback is not None:
659
+ callback(locals())
660
+
661
+ if return_n_iter:
662
+ return code, dictionary, errors, ii + 1
663
+ else:
664
+ return code, dictionary, errors
665
+
666
+
667
+ def dict_learning_online(
668
+ X,
669
+ n_components=2,
670
+ *,
671
+ alpha=1,
672
+ max_iter=100,
673
+ return_code=True,
674
+ dict_init=None,
675
+ callback=None,
676
+ batch_size=256,
677
+ verbose=False,
678
+ shuffle=True,
679
+ n_jobs=None,
680
+ method="lars",
681
+ random_state=None,
682
+ positive_dict=False,
683
+ positive_code=False,
684
+ method_max_iter=1000,
685
+ tol=1e-3,
686
+ max_no_improvement=10,
687
+ ):
688
+ """Solve a dictionary learning matrix factorization problem online.
689
+
690
+ Finds the best dictionary and the corresponding sparse code for
691
+ approximating the data matrix X by solving::
692
+
693
+ (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
694
+ (U,V)
695
+ with || V_k ||_2 = 1 for all 0 <= k < n_components
696
+
697
+ where V is the dictionary and U is the sparse code. ||.||_Fro stands for
698
+ the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
699
+ which is the sum of the absolute values of all the entries in the matrix.
700
+ This is accomplished by repeatedly iterating over mini-batches by slicing
701
+ the input data.
702
+
703
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
704
+
705
+ Parameters
706
+ ----------
707
+ X : ndarray of shape (n_samples, n_features)
708
+ Data matrix.
709
+
710
+ n_components : int or None, default=2
711
+ Number of dictionary atoms to extract. If None, then ``n_components``
712
+ is set to ``n_features``.
713
+
714
+ alpha : float, default=1
715
+ Sparsity controlling parameter.
716
+
717
+ max_iter : int, default=100
718
+ Maximum number of iterations over the complete dataset before
719
+ stopping independently of any early stopping criterion heuristics.
720
+
721
+ .. versionadded:: 1.1
722
+
723
+ .. deprecated:: 1.4
724
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
725
+ Use the default value (i.e. `100`) instead.
726
+
727
+ return_code : bool, default=True
728
+ Whether to also return the code U or just the dictionary `V`.
729
+
730
+ dict_init : ndarray of shape (n_components, n_features), default=None
731
+ Initial values for the dictionary for warm restart scenarios.
732
+ If `None`, the initial values for the dictionary are created
733
+ with an SVD decomposition of the data via
734
+ :func:`~sklearn.utils.extmath.randomized_svd`.
735
+
736
+ callback : callable, default=None
737
+ A callable that gets invoked at the end of each iteration.
738
+
739
+ batch_size : int, default=256
740
+ The number of samples to take in each batch.
741
+
742
+ .. versionchanged:: 1.3
743
+ The default value of `batch_size` changed from 3 to 256 in version 1.3.
744
+
745
+ verbose : bool, default=False
746
+ To control the verbosity of the procedure.
747
+
748
+ shuffle : bool, default=True
749
+ Whether to shuffle the data before splitting it in batches.
750
+
751
+ n_jobs : int, default=None
752
+ Number of parallel jobs to run.
753
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
754
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
755
+ for more details.
756
+
757
+ method : {'lars', 'cd'}, default='lars'
758
+ * `'lars'`: uses the least angle regression method to solve the lasso
759
+ problem (`linear_model.lars_path`);
760
+ * `'cd'`: uses the coordinate descent method to compute the
761
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
762
+ the estimated components are sparse.
763
+
764
+ random_state : int, RandomState instance or None, default=None
765
+ Used for initializing the dictionary when ``dict_init`` is not
766
+ specified, randomly shuffling the data when ``shuffle`` is set to
767
+ ``True``, and updating the dictionary. Pass an int for reproducible
768
+ results across multiple function calls.
769
+ See :term:`Glossary <random_state>`.
770
+
771
+ positive_dict : bool, default=False
772
+ Whether to enforce positivity when finding the dictionary.
773
+
774
+ .. versionadded:: 0.20
775
+
776
+ positive_code : bool, default=False
777
+ Whether to enforce positivity when finding the code.
778
+
779
+ .. versionadded:: 0.20
780
+
781
+ method_max_iter : int, default=1000
782
+ Maximum number of iterations to perform when solving the lasso problem.
783
+
784
+ .. versionadded:: 0.22
785
+
786
+ tol : float, default=1e-3
787
+ Control early stopping based on the norm of the differences in the
788
+ dictionary between 2 steps.
789
+
790
+ To disable early stopping based on changes in the dictionary, set
791
+ `tol` to 0.0.
792
+
793
+ .. versionadded:: 1.1
794
+
795
+ max_no_improvement : int, default=10
796
+ Control early stopping based on the consecutive number of mini batches
797
+ that does not yield an improvement on the smoothed cost function.
798
+
799
+ To disable convergence detection based on cost function, set
800
+ `max_no_improvement` to None.
801
+
802
+ .. versionadded:: 1.1
803
+
804
+ Returns
805
+ -------
806
+ code : ndarray of shape (n_samples, n_components),
807
+ The sparse code (only returned if `return_code=True`).
808
+
809
+ dictionary : ndarray of shape (n_components, n_features),
810
+ The solutions to the dictionary learning problem.
811
+
812
+ n_iter : int
813
+ Number of iterations run. Returned only if `return_n_iter` is
814
+ set to `True`.
815
+
816
+ See Also
817
+ --------
818
+ dict_learning : Solve a dictionary learning matrix factorization problem.
819
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
820
+ MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary
821
+ learning algorithm.
822
+ SparsePCA : Sparse Principal Components Analysis.
823
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
824
+
825
+ Examples
826
+ --------
827
+ >>> import numpy as np
828
+ >>> from sklearn.datasets import make_sparse_coded_signal
829
+ >>> from sklearn.decomposition import dict_learning_online
830
+ >>> X, _, _ = make_sparse_coded_signal(
831
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
832
+ ... random_state=42,
833
+ ... )
834
+ >>> U, V = dict_learning_online(
835
+ ... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
836
+ ... )
837
+
838
+ We can check the level of sparsity of `U`:
839
+
840
+ >>> np.mean(U == 0)
841
+ 0.53...
842
+
843
+ We can compare the average squared euclidean norm of the reconstruction
844
+ error of the sparse coded signal relative to the squared euclidean norm of
845
+ the original signal:
846
+
847
+ >>> X_hat = U @ V
848
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
849
+ 0.05...
850
+ """
851
+ # TODO(1.6): remove in 1.6
852
+ if max_iter is None:
853
+ warn(
854
+ (
855
+ "`max_iter=None` is deprecated in version 1.4 and will be removed in "
856
+ "version 1.6. Use the default value (i.e. `100`) instead."
857
+ ),
858
+ FutureWarning,
859
+ )
860
+ max_iter = 100
861
+
862
+ transform_algorithm = "lasso_" + method
863
+
864
+ est = MiniBatchDictionaryLearning(
865
+ n_components=n_components,
866
+ alpha=alpha,
867
+ max_iter=max_iter,
868
+ n_jobs=n_jobs,
869
+ fit_algorithm=method,
870
+ batch_size=batch_size,
871
+ shuffle=shuffle,
872
+ dict_init=dict_init,
873
+ random_state=random_state,
874
+ transform_algorithm=transform_algorithm,
875
+ transform_alpha=alpha,
876
+ positive_code=positive_code,
877
+ positive_dict=positive_dict,
878
+ transform_max_iter=method_max_iter,
879
+ verbose=verbose,
880
+ callback=callback,
881
+ tol=tol,
882
+ max_no_improvement=max_no_improvement,
883
+ ).fit(X)
884
+
885
+ if not return_code:
886
+ return est.components_
887
+ else:
888
+ code = est.transform(X)
889
+ return code, est.components_
890
+
891
+
892
+ @validate_params(
893
+ {
894
+ "X": ["array-like"],
895
+ "method": [StrOptions({"lars", "cd"})],
896
+ "return_n_iter": ["boolean"],
897
+ "method_max_iter": [Interval(Integral, 0, None, closed="left")],
898
+ },
899
+ prefer_skip_nested_validation=False,
900
+ )
901
+ def dict_learning(
902
+ X,
903
+ n_components,
904
+ *,
905
+ alpha,
906
+ max_iter=100,
907
+ tol=1e-8,
908
+ method="lars",
909
+ n_jobs=None,
910
+ dict_init=None,
911
+ code_init=None,
912
+ callback=None,
913
+ verbose=False,
914
+ random_state=None,
915
+ return_n_iter=False,
916
+ positive_dict=False,
917
+ positive_code=False,
918
+ method_max_iter=1000,
919
+ ):
920
+ """Solve a dictionary learning matrix factorization problem.
921
+
922
+ Finds the best dictionary and the corresponding sparse code for
923
+ approximating the data matrix X by solving::
924
+
925
+ (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
926
+ (U,V)
927
+ with || V_k ||_2 = 1 for all 0 <= k < n_components
928
+
929
+ where V is the dictionary and U is the sparse code. ||.||_Fro stands for
930
+ the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
931
+ which is the sum of the absolute values of all the entries in the matrix.
932
+
933
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
934
+
935
+ Parameters
936
+ ----------
937
+ X : array-like of shape (n_samples, n_features)
938
+ Data matrix.
939
+
940
+ n_components : int
941
+ Number of dictionary atoms to extract.
942
+
943
+ alpha : int or float
944
+ Sparsity controlling parameter.
945
+
946
+ max_iter : int, default=100
947
+ Maximum number of iterations to perform.
948
+
949
+ tol : float, default=1e-8
950
+ Tolerance for the stopping condition.
951
+
952
+ method : {'lars', 'cd'}, default='lars'
953
+ The method used:
954
+
955
+ * `'lars'`: uses the least angle regression method to solve the lasso
956
+ problem (`linear_model.lars_path`);
957
+ * `'cd'`: uses the coordinate descent method to compute the
958
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
959
+ the estimated components are sparse.
960
+
961
+ n_jobs : int, default=None
962
+ Number of parallel jobs to run.
963
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
964
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
965
+ for more details.
966
+
967
+ dict_init : ndarray of shape (n_components, n_features), default=None
968
+ Initial value for the dictionary for warm restart scenarios. Only used
969
+ if `code_init` and `dict_init` are not None.
970
+
971
+ code_init : ndarray of shape (n_samples, n_components), default=None
972
+ Initial value for the sparse code for warm restart scenarios. Only used
973
+ if `code_init` and `dict_init` are not None.
974
+
975
+ callback : callable, default=None
976
+ Callable that gets invoked every five iterations.
977
+
978
+ verbose : bool, default=False
979
+ To control the verbosity of the procedure.
980
+
981
+ random_state : int, RandomState instance or None, default=None
982
+ Used for randomly initializing the dictionary. Pass an int for
983
+ reproducible results across multiple function calls.
984
+ See :term:`Glossary <random_state>`.
985
+
986
+ return_n_iter : bool, default=False
987
+ Whether or not to return the number of iterations.
988
+
989
+ positive_dict : bool, default=False
990
+ Whether to enforce positivity when finding the dictionary.
991
+
992
+ .. versionadded:: 0.20
993
+
994
+ positive_code : bool, default=False
995
+ Whether to enforce positivity when finding the code.
996
+
997
+ .. versionadded:: 0.20
998
+
999
+ method_max_iter : int, default=1000
1000
+ Maximum number of iterations to perform.
1001
+
1002
+ .. versionadded:: 0.22
1003
+
1004
+ Returns
1005
+ -------
1006
+ code : ndarray of shape (n_samples, n_components)
1007
+ The sparse code factor in the matrix factorization.
1008
+
1009
+ dictionary : ndarray of shape (n_components, n_features),
1010
+ The dictionary factor in the matrix factorization.
1011
+
1012
+ errors : array
1013
+ Vector of errors at each iteration.
1014
+
1015
+ n_iter : int
1016
+ Number of iterations run. Returned only if `return_n_iter` is
1017
+ set to True.
1018
+
1019
+ See Also
1020
+ --------
1021
+ dict_learning_online : Solve a dictionary learning matrix factorization
1022
+ problem online.
1023
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1024
+ MiniBatchDictionaryLearning : A faster, less accurate version
1025
+ of the dictionary learning algorithm.
1026
+ SparsePCA : Sparse Principal Components Analysis.
1027
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1028
+
1029
+ Examples
1030
+ --------
1031
+ >>> import numpy as np
1032
+ >>> from sklearn.datasets import make_sparse_coded_signal
1033
+ >>> from sklearn.decomposition import dict_learning
1034
+ >>> X, _, _ = make_sparse_coded_signal(
1035
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1036
+ ... random_state=42,
1037
+ ... )
1038
+ >>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)
1039
+
1040
+ We can check the level of sparsity of `U`:
1041
+
1042
+ >>> np.mean(U == 0)
1043
+ 0.6...
1044
+
1045
+ We can compare the average squared euclidean norm of the reconstruction
1046
+ error of the sparse coded signal relative to the squared euclidean norm of
1047
+ the original signal:
1048
+
1049
+ >>> X_hat = U @ V
1050
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1051
+ 0.01...
1052
+ """
1053
+ estimator = DictionaryLearning(
1054
+ n_components=n_components,
1055
+ alpha=alpha,
1056
+ max_iter=max_iter,
1057
+ tol=tol,
1058
+ fit_algorithm=method,
1059
+ n_jobs=n_jobs,
1060
+ dict_init=dict_init,
1061
+ callback=callback,
1062
+ code_init=code_init,
1063
+ verbose=verbose,
1064
+ random_state=random_state,
1065
+ positive_code=positive_code,
1066
+ positive_dict=positive_dict,
1067
+ transform_max_iter=method_max_iter,
1068
+ ).set_output(transform="default")
1069
+ code = estimator.fit_transform(X)
1070
+ if return_n_iter:
1071
+ return (
1072
+ code,
1073
+ estimator.components_,
1074
+ estimator.error_,
1075
+ estimator.n_iter_,
1076
+ )
1077
+ return code, estimator.components_, estimator.error_
1078
+
1079
+
1080
+ class _BaseSparseCoding(ClassNamePrefixFeaturesOutMixin, TransformerMixin):
1081
+ """Base class from SparseCoder and DictionaryLearning algorithms."""
1082
+
1083
+ def __init__(
1084
+ self,
1085
+ transform_algorithm,
1086
+ transform_n_nonzero_coefs,
1087
+ transform_alpha,
1088
+ split_sign,
1089
+ n_jobs,
1090
+ positive_code,
1091
+ transform_max_iter,
1092
+ ):
1093
+ self.transform_algorithm = transform_algorithm
1094
+ self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
1095
+ self.transform_alpha = transform_alpha
1096
+ self.transform_max_iter = transform_max_iter
1097
+ self.split_sign = split_sign
1098
+ self.n_jobs = n_jobs
1099
+ self.positive_code = positive_code
1100
+
1101
+ def _transform(self, X, dictionary):
1102
+ """Private method allowing to accommodate both DictionaryLearning and
1103
+ SparseCoder."""
1104
+ X = self._validate_data(X, reset=False)
1105
+
1106
+ if hasattr(self, "alpha") and self.transform_alpha is None:
1107
+ transform_alpha = self.alpha
1108
+ else:
1109
+ transform_alpha = self.transform_alpha
1110
+
1111
+ code = sparse_encode(
1112
+ X,
1113
+ dictionary,
1114
+ algorithm=self.transform_algorithm,
1115
+ n_nonzero_coefs=self.transform_n_nonzero_coefs,
1116
+ alpha=transform_alpha,
1117
+ max_iter=self.transform_max_iter,
1118
+ n_jobs=self.n_jobs,
1119
+ positive=self.positive_code,
1120
+ )
1121
+
1122
+ if self.split_sign:
1123
+ # feature vector is split into a positive and negative side
1124
+ n_samples, n_features = code.shape
1125
+ split_code = np.empty((n_samples, 2 * n_features))
1126
+ split_code[:, :n_features] = np.maximum(code, 0)
1127
+ split_code[:, n_features:] = -np.minimum(code, 0)
1128
+ code = split_code
1129
+
1130
+ return code
1131
+
1132
+ def transform(self, X):
1133
+ """Encode the data as a sparse combination of the dictionary atoms.
1134
+
1135
+ Coding method is determined by the object parameter
1136
+ `transform_algorithm`.
1137
+
1138
+ Parameters
1139
+ ----------
1140
+ X : ndarray of shape (n_samples, n_features)
1141
+ Test data to be transformed, must have the same number of
1142
+ features as the data used to train the model.
1143
+
1144
+ Returns
1145
+ -------
1146
+ X_new : ndarray of shape (n_samples, n_components)
1147
+ Transformed data.
1148
+ """
1149
+ check_is_fitted(self)
1150
+ return self._transform(X, self.components_)
1151
+
1152
+
1153
+ class SparseCoder(_BaseSparseCoding, BaseEstimator):
1154
+ """Sparse coding.
1155
+
1156
+ Finds a sparse representation of data against a fixed, precomputed
1157
+ dictionary.
1158
+
1159
+ Each row of the result is the solution to a sparse coding problem.
1160
+ The goal is to find a sparse array `code` such that::
1161
+
1162
+ X ~= code * dictionary
1163
+
1164
+ Read more in the :ref:`User Guide <SparseCoder>`.
1165
+
1166
+ Parameters
1167
+ ----------
1168
+ dictionary : ndarray of shape (n_components, n_features)
1169
+ The dictionary atoms used for sparse coding. Lines are assumed to be
1170
+ normalized to unit norm.
1171
+
1172
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1173
+ 'threshold'}, default='omp'
1174
+ Algorithm used to transform the data:
1175
+
1176
+ - `'lars'`: uses the least angle regression method
1177
+ (`linear_model.lars_path`);
1178
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution;
1179
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1180
+ Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
1181
+ the estimated components are sparse;
1182
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1183
+ solution;
1184
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1185
+ the projection ``dictionary * X'``.
1186
+
1187
+ transform_n_nonzero_coefs : int, default=None
1188
+ Number of nonzero coefficients to target in each column of the
1189
+ solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
1190
+ and is overridden by `alpha` in the `omp` case. If `None`, then
1191
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1192
+
1193
+ transform_alpha : float, default=None
1194
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1195
+ penalty applied to the L1 norm.
1196
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1197
+ threshold below which coefficients will be squashed to zero.
1198
+ If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
1199
+ the reconstruction error targeted. In this case, it overrides
1200
+ `n_nonzero_coefs`.
1201
+ If `None`, default to 1.
1202
+
1203
+ split_sign : bool, default=False
1204
+ Whether to split the sparse feature vector into the concatenation of
1205
+ its negative part and its positive part. This can improve the
1206
+ performance of downstream classifiers.
1207
+
1208
+ n_jobs : int, default=None
1209
+ Number of parallel jobs to run.
1210
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1211
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1212
+ for more details.
1213
+
1214
+ positive_code : bool, default=False
1215
+ Whether to enforce positivity when finding the code.
1216
+
1217
+ .. versionadded:: 0.20
1218
+
1219
+ transform_max_iter : int, default=1000
1220
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1221
+ `lasso_lars`.
1222
+
1223
+ .. versionadded:: 0.22
1224
+
1225
+ Attributes
1226
+ ----------
1227
+ n_components_ : int
1228
+ Number of atoms.
1229
+
1230
+ n_features_in_ : int
1231
+ Number of features seen during :term:`fit`.
1232
+
1233
+ .. versionadded:: 0.24
1234
+
1235
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1236
+ Names of features seen during :term:`fit`. Defined only when `X`
1237
+ has feature names that are all strings.
1238
+
1239
+ .. versionadded:: 1.0
1240
+
1241
+ See Also
1242
+ --------
1243
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1244
+ MiniBatchDictionaryLearning : A faster, less accurate, version of the
1245
+ dictionary learning algorithm.
1246
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1247
+ SparsePCA : Sparse Principal Components Analysis.
1248
+ sparse_encode : Sparse coding where each row of the result is the solution
1249
+ to a sparse coding problem.
1250
+
1251
+ Examples
1252
+ --------
1253
+ >>> import numpy as np
1254
+ >>> from sklearn.decomposition import SparseCoder
1255
+ >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
1256
+ >>> dictionary = np.array(
1257
+ ... [[0, 1, 0],
1258
+ ... [-1, -1, 2],
1259
+ ... [1, 1, 1],
1260
+ ... [0, 1, 1],
1261
+ ... [0, 2, 1]],
1262
+ ... dtype=np.float64
1263
+ ... )
1264
+ >>> coder = SparseCoder(
1265
+ ... dictionary=dictionary, transform_algorithm='lasso_lars',
1266
+ ... transform_alpha=1e-10,
1267
+ ... )
1268
+ >>> coder.transform(X)
1269
+ array([[ 0., 0., -1., 0., 0.],
1270
+ [ 0., 1., 1., 0., 0.]])
1271
+ """
1272
+
1273
+ _required_parameters = ["dictionary"]
1274
+
1275
+ def __init__(
1276
+ self,
1277
+ dictionary,
1278
+ *,
1279
+ transform_algorithm="omp",
1280
+ transform_n_nonzero_coefs=None,
1281
+ transform_alpha=None,
1282
+ split_sign=False,
1283
+ n_jobs=None,
1284
+ positive_code=False,
1285
+ transform_max_iter=1000,
1286
+ ):
1287
+ super().__init__(
1288
+ transform_algorithm,
1289
+ transform_n_nonzero_coefs,
1290
+ transform_alpha,
1291
+ split_sign,
1292
+ n_jobs,
1293
+ positive_code,
1294
+ transform_max_iter,
1295
+ )
1296
+ self.dictionary = dictionary
1297
+
1298
+ def fit(self, X, y=None):
1299
+ """Do nothing and return the estimator unchanged.
1300
+
1301
+ This method is just there to implement the usual API and hence
1302
+ work in pipelines.
1303
+
1304
+ Parameters
1305
+ ----------
1306
+ X : Ignored
1307
+ Not used, present for API consistency by convention.
1308
+
1309
+ y : Ignored
1310
+ Not used, present for API consistency by convention.
1311
+
1312
+ Returns
1313
+ -------
1314
+ self : object
1315
+ Returns the instance itself.
1316
+ """
1317
+ return self
1318
+
1319
+ def transform(self, X, y=None):
1320
+ """Encode the data as a sparse combination of the dictionary atoms.
1321
+
1322
+ Coding method is determined by the object parameter
1323
+ `transform_algorithm`.
1324
+
1325
+ Parameters
1326
+ ----------
1327
+ X : ndarray of shape (n_samples, n_features)
1328
+ Training vector, where `n_samples` is the number of samples
1329
+ and `n_features` is the number of features.
1330
+
1331
+ y : Ignored
1332
+ Not used, present for API consistency by convention.
1333
+
1334
+ Returns
1335
+ -------
1336
+ X_new : ndarray of shape (n_samples, n_components)
1337
+ Transformed data.
1338
+ """
1339
+ return super()._transform(X, self.dictionary)
1340
+
1341
+ def _more_tags(self):
1342
+ return {
1343
+ "requires_fit": False,
1344
+ "preserves_dtype": [np.float64, np.float32],
1345
+ }
1346
+
1347
+ @property
1348
+ def n_components_(self):
1349
+ """Number of atoms."""
1350
+ return self.dictionary.shape[0]
1351
+
1352
+ @property
1353
+ def n_features_in_(self):
1354
+ """Number of features seen during `fit`."""
1355
+ return self.dictionary.shape[1]
1356
+
1357
+ @property
1358
+ def _n_features_out(self):
1359
+ """Number of transformed output features."""
1360
+ return self.n_components_
1361
+
1362
+
1363
+ class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
1364
+ """Dictionary learning.
1365
+
1366
+ Finds a dictionary (a set of atoms) that performs well at sparsely
1367
+ encoding the fitted data.
1368
+
1369
+ Solves the optimization problem::
1370
+
1371
+ (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
1372
+ (U,V)
1373
+ with || V_k ||_2 <= 1 for all 0 <= k < n_components
1374
+
1375
+ ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
1376
+ the entry-wise matrix norm which is the sum of the absolute values
1377
+ of all the entries in the matrix.
1378
+
1379
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
1380
+
1381
+ Parameters
1382
+ ----------
1383
+ n_components : int, default=None
1384
+ Number of dictionary elements to extract. If None, then ``n_components``
1385
+ is set to ``n_features``.
1386
+
1387
+ alpha : float, default=1.0
1388
+ Sparsity controlling parameter.
1389
+
1390
+ max_iter : int, default=1000
1391
+ Maximum number of iterations to perform.
1392
+
1393
+ tol : float, default=1e-8
1394
+ Tolerance for numerical error.
1395
+
1396
+ fit_algorithm : {'lars', 'cd'}, default='lars'
1397
+ * `'lars'`: uses the least angle regression method to solve the lasso
1398
+ problem (:func:`~sklearn.linear_model.lars_path`);
1399
+ * `'cd'`: uses the coordinate descent method to compute the
1400
+ Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be
1401
+ faster if the estimated components are sparse.
1402
+
1403
+ .. versionadded:: 0.17
1404
+ *cd* coordinate descent method to improve speed.
1405
+
1406
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1407
+ 'threshold'}, default='omp'
1408
+ Algorithm used to transform the data:
1409
+
1410
+ - `'lars'`: uses the least angle regression method
1411
+ (:func:`~sklearn.linear_model.lars_path`);
1412
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution.
1413
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1414
+ Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`
1415
+ will be faster if the estimated components are sparse.
1416
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1417
+ solution.
1418
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1419
+ the projection ``dictionary * X'``.
1420
+
1421
+ .. versionadded:: 0.17
1422
+ *lasso_cd* coordinate descent method to improve speed.
1423
+
1424
+ transform_n_nonzero_coefs : int, default=None
1425
+ Number of nonzero coefficients to target in each column of the
1426
+ solution. This is only used by `algorithm='lars'` and
1427
+ `algorithm='omp'`. If `None`, then
1428
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1429
+
1430
+ transform_alpha : float, default=None
1431
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1432
+ penalty applied to the L1 norm.
1433
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1434
+ threshold below which coefficients will be squashed to zero.
1435
+ If `None`, defaults to `alpha`.
1436
+
1437
+ .. versionchanged:: 1.2
1438
+ When None, default value changed from 1.0 to `alpha`.
1439
+
1440
+ n_jobs : int or None, default=None
1441
+ Number of parallel jobs to run.
1442
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1443
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1444
+ for more details.
1445
+
1446
+ code_init : ndarray of shape (n_samples, n_components), default=None
1447
+ Initial value for the code, for warm restart. Only used if `code_init`
1448
+ and `dict_init` are not None.
1449
+
1450
+ dict_init : ndarray of shape (n_components, n_features), default=None
1451
+ Initial values for the dictionary, for warm restart. Only used if
1452
+ `code_init` and `dict_init` are not None.
1453
+
1454
+ callback : callable, default=None
1455
+ Callable that gets invoked every five iterations.
1456
+
1457
+ .. versionadded:: 1.3
1458
+
1459
+ verbose : bool, default=False
1460
+ To control the verbosity of the procedure.
1461
+
1462
+ split_sign : bool, default=False
1463
+ Whether to split the sparse feature vector into the concatenation of
1464
+ its negative part and its positive part. This can improve the
1465
+ performance of downstream classifiers.
1466
+
1467
+ random_state : int, RandomState instance or None, default=None
1468
+ Used for initializing the dictionary when ``dict_init`` is not
1469
+ specified, randomly shuffling the data when ``shuffle`` is set to
1470
+ ``True``, and updating the dictionary. Pass an int for reproducible
1471
+ results across multiple function calls.
1472
+ See :term:`Glossary <random_state>`.
1473
+
1474
+ positive_code : bool, default=False
1475
+ Whether to enforce positivity when finding the code.
1476
+
1477
+ .. versionadded:: 0.20
1478
+
1479
+ positive_dict : bool, default=False
1480
+ Whether to enforce positivity when finding the dictionary.
1481
+
1482
+ .. versionadded:: 0.20
1483
+
1484
+ transform_max_iter : int, default=1000
1485
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1486
+ `'lasso_lars'`.
1487
+
1488
+ .. versionadded:: 0.22
1489
+
1490
+ Attributes
1491
+ ----------
1492
+ components_ : ndarray of shape (n_components, n_features)
1493
+ dictionary atoms extracted from the data
1494
+
1495
+ error_ : array
1496
+ vector of errors at each iteration
1497
+
1498
+ n_features_in_ : int
1499
+ Number of features seen during :term:`fit`.
1500
+
1501
+ .. versionadded:: 0.24
1502
+
1503
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1504
+ Names of features seen during :term:`fit`. Defined only when `X`
1505
+ has feature names that are all strings.
1506
+
1507
+ .. versionadded:: 1.0
1508
+
1509
+ n_iter_ : int
1510
+ Number of iterations run.
1511
+
1512
+ See Also
1513
+ --------
1514
+ MiniBatchDictionaryLearning: A faster, less accurate, version of the
1515
+ dictionary learning algorithm.
1516
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1517
+ SparseCoder : Find a sparse representation of data from a fixed,
1518
+ precomputed dictionary.
1519
+ SparsePCA : Sparse Principal Components Analysis.
1520
+
1521
+ References
1522
+ ----------
1523
+
1524
+ J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
1525
+ for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
1526
+
1527
+ Examples
1528
+ --------
1529
+ >>> import numpy as np
1530
+ >>> from sklearn.datasets import make_sparse_coded_signal
1531
+ >>> from sklearn.decomposition import DictionaryLearning
1532
+ >>> X, dictionary, code = make_sparse_coded_signal(
1533
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1534
+ ... random_state=42,
1535
+ ... )
1536
+ >>> dict_learner = DictionaryLearning(
1537
+ ... n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.1,
1538
+ ... random_state=42,
1539
+ ... )
1540
+ >>> X_transformed = dict_learner.fit(X).transform(X)
1541
+
1542
+ We can check the level of sparsity of `X_transformed`:
1543
+
1544
+ >>> np.mean(X_transformed == 0)
1545
+ 0.52...
1546
+
1547
+ We can compare the average squared euclidean norm of the reconstruction
1548
+ error of the sparse coded signal relative to the squared euclidean norm of
1549
+ the original signal:
1550
+
1551
+ >>> X_hat = X_transformed @ dict_learner.components_
1552
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1553
+ 0.05...
1554
+ """
1555
+
1556
+ _parameter_constraints: dict = {
1557
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
1558
+ "alpha": [Interval(Real, 0, None, closed="left")],
1559
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
1560
+ "tol": [Interval(Real, 0, None, closed="left")],
1561
+ "fit_algorithm": [StrOptions({"lars", "cd"})],
1562
+ "transform_algorithm": [
1563
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
1564
+ ],
1565
+ "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
1566
+ "transform_alpha": [Interval(Real, 0, None, closed="left"), None],
1567
+ "n_jobs": [Integral, None],
1568
+ "code_init": [np.ndarray, None],
1569
+ "dict_init": [np.ndarray, None],
1570
+ "callback": [callable, None],
1571
+ "verbose": ["verbose"],
1572
+ "split_sign": ["boolean"],
1573
+ "random_state": ["random_state"],
1574
+ "positive_code": ["boolean"],
1575
+ "positive_dict": ["boolean"],
1576
+ "transform_max_iter": [Interval(Integral, 0, None, closed="left")],
1577
+ }
1578
+
1579
+ def __init__(
1580
+ self,
1581
+ n_components=None,
1582
+ *,
1583
+ alpha=1,
1584
+ max_iter=1000,
1585
+ tol=1e-8,
1586
+ fit_algorithm="lars",
1587
+ transform_algorithm="omp",
1588
+ transform_n_nonzero_coefs=None,
1589
+ transform_alpha=None,
1590
+ n_jobs=None,
1591
+ code_init=None,
1592
+ dict_init=None,
1593
+ callback=None,
1594
+ verbose=False,
1595
+ split_sign=False,
1596
+ random_state=None,
1597
+ positive_code=False,
1598
+ positive_dict=False,
1599
+ transform_max_iter=1000,
1600
+ ):
1601
+ super().__init__(
1602
+ transform_algorithm,
1603
+ transform_n_nonzero_coefs,
1604
+ transform_alpha,
1605
+ split_sign,
1606
+ n_jobs,
1607
+ positive_code,
1608
+ transform_max_iter,
1609
+ )
1610
+ self.n_components = n_components
1611
+ self.alpha = alpha
1612
+ self.max_iter = max_iter
1613
+ self.tol = tol
1614
+ self.fit_algorithm = fit_algorithm
1615
+ self.code_init = code_init
1616
+ self.dict_init = dict_init
1617
+ self.callback = callback
1618
+ self.verbose = verbose
1619
+ self.random_state = random_state
1620
+ self.positive_dict = positive_dict
1621
+
1622
+ def fit(self, X, y=None):
1623
+ """Fit the model from data in X.
1624
+
1625
+ Parameters
1626
+ ----------
1627
+ X : array-like of shape (n_samples, n_features)
1628
+ Training vector, where `n_samples` is the number of samples
1629
+ and `n_features` is the number of features.
1630
+
1631
+ y : Ignored
1632
+ Not used, present for API consistency by convention.
1633
+
1634
+ Returns
1635
+ -------
1636
+ self : object
1637
+ Returns the instance itself.
1638
+ """
1639
+ self.fit_transform(X)
1640
+ return self
1641
+
1642
+ @_fit_context(prefer_skip_nested_validation=True)
1643
+ def fit_transform(self, X, y=None):
1644
+ """Fit the model from data in X and return the transformed data.
1645
+
1646
+ Parameters
1647
+ ----------
1648
+ X : array-like of shape (n_samples, n_features)
1649
+ Training vector, where `n_samples` is the number of samples
1650
+ and `n_features` is the number of features.
1651
+
1652
+ y : Ignored
1653
+ Not used, present for API consistency by convention.
1654
+
1655
+ Returns
1656
+ -------
1657
+ V : ndarray of shape (n_samples, n_components)
1658
+ Transformed data.
1659
+ """
1660
+ _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code)
1661
+
1662
+ method = "lasso_" + self.fit_algorithm
1663
+
1664
+ random_state = check_random_state(self.random_state)
1665
+ X = self._validate_data(X)
1666
+
1667
+ if self.n_components is None:
1668
+ n_components = X.shape[1]
1669
+ else:
1670
+ n_components = self.n_components
1671
+
1672
+ V, U, E, self.n_iter_ = _dict_learning(
1673
+ X,
1674
+ n_components,
1675
+ alpha=self.alpha,
1676
+ tol=self.tol,
1677
+ max_iter=self.max_iter,
1678
+ method=method,
1679
+ method_max_iter=self.transform_max_iter,
1680
+ n_jobs=self.n_jobs,
1681
+ code_init=self.code_init,
1682
+ dict_init=self.dict_init,
1683
+ callback=self.callback,
1684
+ verbose=self.verbose,
1685
+ random_state=random_state,
1686
+ return_n_iter=True,
1687
+ positive_dict=self.positive_dict,
1688
+ positive_code=self.positive_code,
1689
+ )
1690
+ self.components_ = U
1691
+ self.error_ = E
1692
+
1693
+ return V
1694
+
1695
+ @property
1696
+ def _n_features_out(self):
1697
+ """Number of transformed output features."""
1698
+ return self.components_.shape[0]
1699
+
1700
+ def _more_tags(self):
1701
+ return {
1702
+ "preserves_dtype": [np.float64, np.float32],
1703
+ }
1704
+
1705
+
1706
+ class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
1707
+ """Mini-batch dictionary learning.
1708
+
1709
+ Finds a dictionary (a set of atoms) that performs well at sparsely
1710
+ encoding the fitted data.
1711
+
1712
+ Solves the optimization problem::
1713
+
1714
+ (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
1715
+ (U,V)
1716
+ with || V_k ||_2 <= 1 for all 0 <= k < n_components
1717
+
1718
+ ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
1719
+ the entry-wise matrix norm which is the sum of the absolute values
1720
+ of all the entries in the matrix.
1721
+
1722
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
1723
+
1724
+ Parameters
1725
+ ----------
1726
+ n_components : int, default=None
1727
+ Number of dictionary elements to extract.
1728
+
1729
+ alpha : float, default=1
1730
+ Sparsity controlling parameter.
1731
+
1732
+ max_iter : int, default=1_000
1733
+ Maximum number of iterations over the complete dataset before
1734
+ stopping independently of any early stopping criterion heuristics.
1735
+
1736
+ .. versionadded:: 1.1
1737
+
1738
+ .. deprecated:: 1.4
1739
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
1740
+ Use the default value (i.e. `1_000`) instead.
1741
+
1742
+ fit_algorithm : {'lars', 'cd'}, default='lars'
1743
+ The algorithm used:
1744
+
1745
+ - `'lars'`: uses the least angle regression method to solve the lasso
1746
+ problem (`linear_model.lars_path`)
1747
+ - `'cd'`: uses the coordinate descent method to compute the
1748
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
1749
+ the estimated components are sparse.
1750
+
1751
+ n_jobs : int, default=None
1752
+ Number of parallel jobs to run.
1753
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1754
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1755
+ for more details.
1756
+
1757
+ batch_size : int, default=256
1758
+ Number of samples in each mini-batch.
1759
+
1760
+ .. versionchanged:: 1.3
1761
+ The default value of `batch_size` changed from 3 to 256 in version 1.3.
1762
+
1763
+ shuffle : bool, default=True
1764
+ Whether to shuffle the samples before forming batches.
1765
+
1766
+ dict_init : ndarray of shape (n_components, n_features), default=None
1767
+ Initial value of the dictionary for warm restart scenarios.
1768
+
1769
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1770
+ 'threshold'}, default='omp'
1771
+ Algorithm used to transform the data:
1772
+
1773
+ - `'lars'`: uses the least angle regression method
1774
+ (`linear_model.lars_path`);
1775
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution.
1776
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1777
+ Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
1778
+ if the estimated components are sparse.
1779
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1780
+ solution.
1781
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1782
+ the projection ``dictionary * X'``.
1783
+
1784
+ transform_n_nonzero_coefs : int, default=None
1785
+ Number of nonzero coefficients to target in each column of the
1786
+ solution. This is only used by `algorithm='lars'` and
1787
+ `algorithm='omp'`. If `None`, then
1788
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1789
+
1790
+ transform_alpha : float, default=None
1791
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1792
+ penalty applied to the L1 norm.
1793
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1794
+ threshold below which coefficients will be squashed to zero.
1795
+ If `None`, defaults to `alpha`.
1796
+
1797
+ .. versionchanged:: 1.2
1798
+ When None, default value changed from 1.0 to `alpha`.
1799
+
1800
+ verbose : bool or int, default=False
1801
+ To control the verbosity of the procedure.
1802
+
1803
+ split_sign : bool, default=False
1804
+ Whether to split the sparse feature vector into the concatenation of
1805
+ its negative part and its positive part. This can improve the
1806
+ performance of downstream classifiers.
1807
+
1808
+ random_state : int, RandomState instance or None, default=None
1809
+ Used for initializing the dictionary when ``dict_init`` is not
1810
+ specified, randomly shuffling the data when ``shuffle`` is set to
1811
+ ``True``, and updating the dictionary. Pass an int for reproducible
1812
+ results across multiple function calls.
1813
+ See :term:`Glossary <random_state>`.
1814
+
1815
+ positive_code : bool, default=False
1816
+ Whether to enforce positivity when finding the code.
1817
+
1818
+ .. versionadded:: 0.20
1819
+
1820
+ positive_dict : bool, default=False
1821
+ Whether to enforce positivity when finding the dictionary.
1822
+
1823
+ .. versionadded:: 0.20
1824
+
1825
+ transform_max_iter : int, default=1000
1826
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1827
+ `'lasso_lars'`.
1828
+
1829
+ .. versionadded:: 0.22
1830
+
1831
+ callback : callable, default=None
1832
+ A callable that gets invoked at the end of each iteration.
1833
+
1834
+ .. versionadded:: 1.1
1835
+
1836
+ tol : float, default=1e-3
1837
+ Control early stopping based on the norm of the differences in the
1838
+ dictionary between 2 steps.
1839
+
1840
+ To disable early stopping based on changes in the dictionary, set
1841
+ `tol` to 0.0.
1842
+
1843
+ .. versionadded:: 1.1
1844
+
1845
+ max_no_improvement : int, default=10
1846
+ Control early stopping based on the consecutive number of mini batches
1847
+ that does not yield an improvement on the smoothed cost function.
1848
+
1849
+ To disable convergence detection based on cost function, set
1850
+ `max_no_improvement` to None.
1851
+
1852
+ .. versionadded:: 1.1
1853
+
1854
+ Attributes
1855
+ ----------
1856
+ components_ : ndarray of shape (n_components, n_features)
1857
+ Components extracted from the data.
1858
+
1859
+ n_features_in_ : int
1860
+ Number of features seen during :term:`fit`.
1861
+
1862
+ .. versionadded:: 0.24
1863
+
1864
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1865
+ Names of features seen during :term:`fit`. Defined only when `X`
1866
+ has feature names that are all strings.
1867
+
1868
+ .. versionadded:: 1.0
1869
+
1870
+ n_iter_ : int
1871
+ Number of iterations over the full dataset.
1872
+
1873
+ n_steps_ : int
1874
+ Number of mini-batches processed.
1875
+
1876
+ .. versionadded:: 1.1
1877
+
1878
+ See Also
1879
+ --------
1880
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1881
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1882
+ SparseCoder : Find a sparse representation of data from a fixed,
1883
+ precomputed dictionary.
1884
+ SparsePCA : Sparse Principal Components Analysis.
1885
+
1886
+ References
1887
+ ----------
1888
+
1889
+ J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
1890
+ for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
1891
+
1892
+ Examples
1893
+ --------
1894
+ >>> import numpy as np
1895
+ >>> from sklearn.datasets import make_sparse_coded_signal
1896
+ >>> from sklearn.decomposition import MiniBatchDictionaryLearning
1897
+ >>> X, dictionary, code = make_sparse_coded_signal(
1898
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1899
+ ... random_state=42)
1900
+ >>> dict_learner = MiniBatchDictionaryLearning(
1901
+ ... n_components=15, batch_size=3, transform_algorithm='lasso_lars',
1902
+ ... transform_alpha=0.1, max_iter=20, random_state=42)
1903
+ >>> X_transformed = dict_learner.fit_transform(X)
1904
+
1905
+ We can check the level of sparsity of `X_transformed`:
1906
+
1907
+ >>> np.mean(X_transformed == 0) > 0.5
1908
+ True
1909
+
1910
+ We can compare the average squared euclidean norm of the reconstruction
1911
+ error of the sparse coded signal relative to the squared euclidean norm of
1912
+ the original signal:
1913
+
1914
+ >>> X_hat = X_transformed @ dict_learner.components_
1915
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1916
+ 0.052...
1917
+ """
1918
+
1919
+ _parameter_constraints: dict = {
1920
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
1921
+ "alpha": [Interval(Real, 0, None, closed="left")],
1922
+ "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)],
1923
+ "fit_algorithm": [StrOptions({"cd", "lars"})],
1924
+ "n_jobs": [None, Integral],
1925
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
1926
+ "shuffle": ["boolean"],
1927
+ "dict_init": [None, np.ndarray],
1928
+ "transform_algorithm": [
1929
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
1930
+ ],
1931
+ "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
1932
+ "transform_alpha": [Interval(Real, 0, None, closed="left"), None],
1933
+ "verbose": ["verbose"],
1934
+ "split_sign": ["boolean"],
1935
+ "random_state": ["random_state"],
1936
+ "positive_code": ["boolean"],
1937
+ "positive_dict": ["boolean"],
1938
+ "transform_max_iter": [Interval(Integral, 0, None, closed="left")],
1939
+ "callback": [None, callable],
1940
+ "tol": [Interval(Real, 0, None, closed="left")],
1941
+ "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
1942
+ }
1943
+
1944
+ def __init__(
1945
+ self,
1946
+ n_components=None,
1947
+ *,
1948
+ alpha=1,
1949
+ max_iter=1_000,
1950
+ fit_algorithm="lars",
1951
+ n_jobs=None,
1952
+ batch_size=256,
1953
+ shuffle=True,
1954
+ dict_init=None,
1955
+ transform_algorithm="omp",
1956
+ transform_n_nonzero_coefs=None,
1957
+ transform_alpha=None,
1958
+ verbose=False,
1959
+ split_sign=False,
1960
+ random_state=None,
1961
+ positive_code=False,
1962
+ positive_dict=False,
1963
+ transform_max_iter=1000,
1964
+ callback=None,
1965
+ tol=1e-3,
1966
+ max_no_improvement=10,
1967
+ ):
1968
+ super().__init__(
1969
+ transform_algorithm,
1970
+ transform_n_nonzero_coefs,
1971
+ transform_alpha,
1972
+ split_sign,
1973
+ n_jobs,
1974
+ positive_code,
1975
+ transform_max_iter,
1976
+ )
1977
+ self.n_components = n_components
1978
+ self.alpha = alpha
1979
+ self.max_iter = max_iter
1980
+ self.fit_algorithm = fit_algorithm
1981
+ self.dict_init = dict_init
1982
+ self.verbose = verbose
1983
+ self.shuffle = shuffle
1984
+ self.batch_size = batch_size
1985
+ self.split_sign = split_sign
1986
+ self.random_state = random_state
1987
+ self.positive_dict = positive_dict
1988
+ self.callback = callback
1989
+ self.max_no_improvement = max_no_improvement
1990
+ self.tol = tol
1991
+
1992
+ def _check_params(self, X):
1993
+ # n_components
1994
+ self._n_components = self.n_components
1995
+ if self._n_components is None:
1996
+ self._n_components = X.shape[1]
1997
+
1998
+ # fit_algorithm
1999
+ _check_positive_coding(self.fit_algorithm, self.positive_code)
2000
+ self._fit_algorithm = "lasso_" + self.fit_algorithm
2001
+
2002
+ # batch_size
2003
+ self._batch_size = min(self.batch_size, X.shape[0])
2004
+
2005
+ def _initialize_dict(self, X, random_state):
2006
+ """Initialization of the dictionary."""
2007
+ if self.dict_init is not None:
2008
+ dictionary = self.dict_init
2009
+ else:
2010
+ # Init V with SVD of X
2011
+ _, S, dictionary = randomized_svd(
2012
+ X, self._n_components, random_state=random_state
2013
+ )
2014
+ dictionary = S[:, np.newaxis] * dictionary
2015
+
2016
+ if self._n_components <= len(dictionary):
2017
+ dictionary = dictionary[: self._n_components, :]
2018
+ else:
2019
+ dictionary = np.concatenate(
2020
+ (
2021
+ dictionary,
2022
+ np.zeros(
2023
+ (self._n_components - len(dictionary), dictionary.shape[1]),
2024
+ dtype=dictionary.dtype,
2025
+ ),
2026
+ )
2027
+ )
2028
+
2029
+ dictionary = check_array(dictionary, order="F", dtype=X.dtype, copy=False)
2030
+ dictionary = np.require(dictionary, requirements="W")
2031
+
2032
+ return dictionary
2033
+
2034
+ def _update_inner_stats(self, X, code, batch_size, step):
2035
+ """Update the inner stats inplace."""
2036
+ if step < batch_size - 1:
2037
+ theta = (step + 1) * batch_size
2038
+ else:
2039
+ theta = batch_size**2 + step + 1 - batch_size
2040
+ beta = (theta + 1 - batch_size) / (theta + 1)
2041
+
2042
+ self._A *= beta
2043
+ self._A += code.T @ code / batch_size
2044
+ self._B *= beta
2045
+ self._B += X.T @ code / batch_size
2046
+
2047
+ def _minibatch_step(self, X, dictionary, random_state, step):
2048
+ """Perform the update on the dictionary for one minibatch."""
2049
+ batch_size = X.shape[0]
2050
+
2051
+ # Compute code for this batch
2052
+ code = _sparse_encode(
2053
+ X,
2054
+ dictionary,
2055
+ algorithm=self._fit_algorithm,
2056
+ alpha=self.alpha,
2057
+ n_jobs=self.n_jobs,
2058
+ positive=self.positive_code,
2059
+ max_iter=self.transform_max_iter,
2060
+ verbose=self.verbose,
2061
+ )
2062
+
2063
+ batch_cost = (
2064
+ 0.5 * ((X - code @ dictionary) ** 2).sum()
2065
+ + self.alpha * np.sum(np.abs(code))
2066
+ ) / batch_size
2067
+
2068
+ # Update inner stats
2069
+ self._update_inner_stats(X, code, batch_size, step)
2070
+
2071
+ # Update dictionary
2072
+ _update_dict(
2073
+ dictionary,
2074
+ X,
2075
+ code,
2076
+ self._A,
2077
+ self._B,
2078
+ verbose=self.verbose,
2079
+ random_state=random_state,
2080
+ positive=self.positive_dict,
2081
+ )
2082
+
2083
+ return batch_cost
2084
+
2085
+ def _check_convergence(
2086
+ self, X, batch_cost, new_dict, old_dict, n_samples, step, n_steps
2087
+ ):
2088
+ """Helper function to encapsulate the early stopping logic.
2089
+
2090
+ Early stopping is based on two factors:
2091
+ - A small change of the dictionary between two minibatch updates. This is
2092
+ controlled by the tol parameter.
2093
+ - No more improvement on a smoothed estimate of the objective function for a
2094
+ a certain number of consecutive minibatch updates. This is controlled by
2095
+ the max_no_improvement parameter.
2096
+ """
2097
+ batch_size = X.shape[0]
2098
+
2099
+ # counts steps starting from 1 for user friendly verbose mode.
2100
+ step = step + 1
2101
+
2102
+ # Ignore 100 first steps or 1 epoch to avoid initializing the ewa_cost with a
2103
+ # too bad value
2104
+ if step <= min(100, n_samples / batch_size):
2105
+ if self.verbose:
2106
+ print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
2107
+ return False
2108
+
2109
+ # Compute an Exponentially Weighted Average of the cost function to
2110
+ # monitor the convergence while discarding minibatch-local stochastic
2111
+ # variability: https://en.wikipedia.org/wiki/Moving_average
2112
+ if self._ewa_cost is None:
2113
+ self._ewa_cost = batch_cost
2114
+ else:
2115
+ alpha = batch_size / (n_samples + 1)
2116
+ alpha = min(alpha, 1)
2117
+ self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
2118
+
2119
+ if self.verbose:
2120
+ print(
2121
+ f"Minibatch step {step}/{n_steps}: mean batch cost: "
2122
+ f"{batch_cost}, ewa cost: {self._ewa_cost}"
2123
+ )
2124
+
2125
+ # Early stopping based on change of dictionary
2126
+ dict_diff = linalg.norm(new_dict - old_dict) / self._n_components
2127
+ if self.tol > 0 and dict_diff <= self.tol:
2128
+ if self.verbose:
2129
+ print(f"Converged (small dictionary change) at step {step}/{n_steps}")
2130
+ return True
2131
+
2132
+ # Early stopping heuristic due to lack of improvement on smoothed
2133
+ # cost function
2134
+ if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
2135
+ self._no_improvement = 0
2136
+ self._ewa_cost_min = self._ewa_cost
2137
+ else:
2138
+ self._no_improvement += 1
2139
+
2140
+ if (
2141
+ self.max_no_improvement is not None
2142
+ and self._no_improvement >= self.max_no_improvement
2143
+ ):
2144
+ if self.verbose:
2145
+ print(
2146
+ "Converged (lack of improvement in objective function) "
2147
+ f"at step {step}/{n_steps}"
2148
+ )
2149
+ return True
2150
+
2151
+ return False
2152
+
2153
+ @_fit_context(prefer_skip_nested_validation=True)
2154
+ def fit(self, X, y=None):
2155
+ """Fit the model from data in X.
2156
+
2157
+ Parameters
2158
+ ----------
2159
+ X : array-like of shape (n_samples, n_features)
2160
+ Training vector, where `n_samples` is the number of samples
2161
+ and `n_features` is the number of features.
2162
+
2163
+ y : Ignored
2164
+ Not used, present for API consistency by convention.
2165
+
2166
+ Returns
2167
+ -------
2168
+ self : object
2169
+ Returns the instance itself.
2170
+ """
2171
+ X = self._validate_data(
2172
+ X, dtype=[np.float64, np.float32], order="C", copy=False
2173
+ )
2174
+
2175
+ self._check_params(X)
2176
+ self._random_state = check_random_state(self.random_state)
2177
+
2178
+ dictionary = self._initialize_dict(X, self._random_state)
2179
+ old_dict = dictionary.copy()
2180
+
2181
+ if self.shuffle:
2182
+ X_train = X.copy()
2183
+ self._random_state.shuffle(X_train)
2184
+ else:
2185
+ X_train = X
2186
+
2187
+ n_samples, n_features = X_train.shape
2188
+
2189
+ if self.verbose:
2190
+ print("[dict_learning]")
2191
+
2192
+ # Inner stats
2193
+ self._A = np.zeros(
2194
+ (self._n_components, self._n_components), dtype=X_train.dtype
2195
+ )
2196
+ self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype)
2197
+
2198
+ # TODO(1.6): remove in 1.6
2199
+ if self.max_iter is None:
2200
+ warn(
2201
+ (
2202
+ "`max_iter=None` is deprecated in version 1.4 and will be removed"
2203
+ " in version 1.6. Use the default value (i.e. `1_000`) instead."
2204
+ ),
2205
+ FutureWarning,
2206
+ )
2207
+ max_iter = 1_000
2208
+ else:
2209
+ max_iter = self.max_iter
2210
+
2211
+ # Attributes to monitor the convergence
2212
+ self._ewa_cost = None
2213
+ self._ewa_cost_min = None
2214
+ self._no_improvement = 0
2215
+
2216
+ batches = gen_batches(n_samples, self._batch_size)
2217
+ batches = itertools.cycle(batches)
2218
+ n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
2219
+ n_steps = max_iter * n_steps_per_iter
2220
+
2221
+ i = -1 # to allow max_iter = 0
2222
+
2223
+ for i, batch in zip(range(n_steps), batches):
2224
+ X_batch = X_train[batch]
2225
+
2226
+ batch_cost = self._minibatch_step(
2227
+ X_batch, dictionary, self._random_state, i
2228
+ )
2229
+
2230
+ if self._check_convergence(
2231
+ X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps
2232
+ ):
2233
+ break
2234
+
2235
+ # XXX callback param added for backward compat in #18975 but a common
2236
+ # unified callback API should be preferred
2237
+ if self.callback is not None:
2238
+ self.callback(locals())
2239
+
2240
+ old_dict[:] = dictionary
2241
+
2242
+ self.n_steps_ = i + 1
2243
+ self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter)
2244
+ self.components_ = dictionary
2245
+
2246
+ return self
2247
+
2248
+ @_fit_context(prefer_skip_nested_validation=True)
2249
+ def partial_fit(self, X, y=None):
2250
+ """Update the model using the data in X as a mini-batch.
2251
+
2252
+ Parameters
2253
+ ----------
2254
+ X : array-like of shape (n_samples, n_features)
2255
+ Training vector, where `n_samples` is the number of samples
2256
+ and `n_features` is the number of features.
2257
+
2258
+ y : Ignored
2259
+ Not used, present for API consistency by convention.
2260
+
2261
+ Returns
2262
+ -------
2263
+ self : object
2264
+ Return the instance itself.
2265
+ """
2266
+ has_components = hasattr(self, "components_")
2267
+
2268
+ X = self._validate_data(
2269
+ X, dtype=[np.float64, np.float32], order="C", reset=not has_components
2270
+ )
2271
+
2272
+ if not has_components:
2273
+ # This instance has not been fitted yet (fit or partial_fit)
2274
+ self._check_params(X)
2275
+ self._random_state = check_random_state(self.random_state)
2276
+
2277
+ dictionary = self._initialize_dict(X, self._random_state)
2278
+
2279
+ self.n_steps_ = 0
2280
+
2281
+ self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)
2282
+ self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)
2283
+ else:
2284
+ dictionary = self.components_
2285
+
2286
+ self._minibatch_step(X, dictionary, self._random_state, self.n_steps_)
2287
+
2288
+ self.components_ = dictionary
2289
+ self.n_steps_ += 1
2290
+
2291
+ return self
2292
+
2293
+ @property
2294
+ def _n_features_out(self):
2295
+ """Number of transformed output features."""
2296
+ return self.components_.shape[0]
2297
+
2298
+ def _more_tags(self):
2299
+ return {
2300
+ "preserves_dtype": [np.float64, np.float32],
2301
+ }
venv/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Factor Analysis.
2
+
3
+ A latent linear variable model.
4
+
5
+ FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
6
+ While PCA assumes Gaussian noise with the same variance for each
7
+ feature, the FactorAnalysis model assumes different variances for
8
+ each of them.
9
+
10
+ This implementation is based on David Barber's Book,
11
+ Bayesian Reasoning and Machine Learning,
12
+ http://www.cs.ucl.ac.uk/staff/d.barber/brml,
13
+ Algorithm 21.1
14
+ """
15
+
16
+ # Author: Christian Osendorfer <[email protected]>
17
+ # Alexandre Gramfort <[email protected]>
18
+ # Denis A. Engemann <[email protected]>
19
+
20
+ # License: BSD3
21
+
22
+ import warnings
23
+ from math import log, sqrt
24
+ from numbers import Integral, Real
25
+
26
+ import numpy as np
27
+ from scipy import linalg
28
+
29
+ from ..base import (
30
+ BaseEstimator,
31
+ ClassNamePrefixFeaturesOutMixin,
32
+ TransformerMixin,
33
+ _fit_context,
34
+ )
35
+ from ..exceptions import ConvergenceWarning
36
+ from ..utils import check_random_state
37
+ from ..utils._param_validation import Interval, StrOptions
38
+ from ..utils.extmath import fast_logdet, randomized_svd, squared_norm
39
+ from ..utils.validation import check_is_fitted
40
+
41
+
42
+ class FactorAnalysis(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
43
+ """Factor Analysis (FA).
44
+
45
+ A simple linear generative model with Gaussian latent variables.
46
+
47
+ The observations are assumed to be caused by a linear transformation of
48
+ lower dimensional latent factors and added Gaussian noise.
49
+ Without loss of generality the factors are distributed according to a
50
+ Gaussian with zero mean and unit covariance. The noise is also zero mean
51
+ and has an arbitrary diagonal covariance matrix.
52
+
53
+ If we would restrict the model further, by assuming that the Gaussian
54
+ noise is even isotropic (all diagonal entries are the same) we would obtain
55
+ :class:`PCA`.
56
+
57
+ FactorAnalysis performs a maximum likelihood estimate of the so-called
58
+ `loading` matrix, the transformation of the latent variables to the
59
+ observed ones, using SVD based approach.
60
+
61
+ Read more in the :ref:`User Guide <FA>`.
62
+
63
+ .. versionadded:: 0.13
64
+
65
+ Parameters
66
+ ----------
67
+ n_components : int, default=None
68
+ Dimensionality of latent space, the number of components
69
+ of ``X`` that are obtained after ``transform``.
70
+ If None, n_components is set to the number of features.
71
+
72
+ tol : float, default=1e-2
73
+ Stopping tolerance for log-likelihood increase.
74
+
75
+ copy : bool, default=True
76
+ Whether to make a copy of X. If ``False``, the input X gets overwritten
77
+ during fitting.
78
+
79
+ max_iter : int, default=1000
80
+ Maximum number of iterations.
81
+
82
+ noise_variance_init : array-like of shape (n_features,), default=None
83
+ The initial guess of the noise variance for each feature.
84
+ If None, it defaults to np.ones(n_features).
85
+
86
+ svd_method : {'lapack', 'randomized'}, default='randomized'
87
+ Which SVD method to use. If 'lapack' use standard SVD from
88
+ scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
89
+ Defaults to 'randomized'. For most applications 'randomized' will
90
+ be sufficiently precise while providing significant speed gains.
91
+ Accuracy can also be improved by setting higher values for
92
+ `iterated_power`. If this is not sufficient, for maximum precision
93
+ you should choose 'lapack'.
94
+
95
+ iterated_power : int, default=3
96
+ Number of iterations for the power method. 3 by default. Only used
97
+ if ``svd_method`` equals 'randomized'.
98
+
99
+ rotation : {'varimax', 'quartimax'}, default=None
100
+ If not None, apply the indicated rotation. Currently, varimax and
101
+ quartimax are implemented. See
102
+ `"The varimax criterion for analytic rotation in factor analysis"
103
+ <https://link.springer.com/article/10.1007%2FBF02289233>`_
104
+ H. F. Kaiser, 1958.
105
+
106
+ .. versionadded:: 0.24
107
+
108
+ random_state : int or RandomState instance, default=0
109
+ Only used when ``svd_method`` equals 'randomized'. Pass an int for
110
+ reproducible results across multiple function calls.
111
+ See :term:`Glossary <random_state>`.
112
+
113
+ Attributes
114
+ ----------
115
+ components_ : ndarray of shape (n_components, n_features)
116
+ Components with maximum variance.
117
+
118
+ loglike_ : list of shape (n_iterations,)
119
+ The log likelihood at each iteration.
120
+
121
+ noise_variance_ : ndarray of shape (n_features,)
122
+ The estimated noise variance for each feature.
123
+
124
+ n_iter_ : int
125
+ Number of iterations run.
126
+
127
+ mean_ : ndarray of shape (n_features,)
128
+ Per-feature empirical mean, estimated from the training set.
129
+
130
+ n_features_in_ : int
131
+ Number of features seen during :term:`fit`.
132
+
133
+ .. versionadded:: 0.24
134
+
135
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
136
+ Names of features seen during :term:`fit`. Defined only when `X`
137
+ has feature names that are all strings.
138
+
139
+ .. versionadded:: 1.0
140
+
141
+ See Also
142
+ --------
143
+ PCA: Principal component analysis is also a latent linear variable model
144
+ which however assumes equal noise variance for each feature.
145
+ This extra assumption makes probabilistic PCA faster as it can be
146
+ computed in closed form.
147
+ FastICA: Independent component analysis, a latent variable model with
148
+ non-Gaussian latent variables.
149
+
150
+ References
151
+ ----------
152
+ - David Barber, Bayesian Reasoning and Machine Learning,
153
+ Algorithm 21.1.
154
+
155
+ - Christopher M. Bishop: Pattern Recognition and Machine Learning,
156
+ Chapter 12.2.4.
157
+
158
+ Examples
159
+ --------
160
+ >>> from sklearn.datasets import load_digits
161
+ >>> from sklearn.decomposition import FactorAnalysis
162
+ >>> X, _ = load_digits(return_X_y=True)
163
+ >>> transformer = FactorAnalysis(n_components=7, random_state=0)
164
+ >>> X_transformed = transformer.fit_transform(X)
165
+ >>> X_transformed.shape
166
+ (1797, 7)
167
+ """
168
+
169
+ _parameter_constraints: dict = {
170
+ "n_components": [Interval(Integral, 0, None, closed="left"), None],
171
+ "tol": [Interval(Real, 0.0, None, closed="left")],
172
+ "copy": ["boolean"],
173
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
174
+ "noise_variance_init": ["array-like", None],
175
+ "svd_method": [StrOptions({"randomized", "lapack"})],
176
+ "iterated_power": [Interval(Integral, 0, None, closed="left")],
177
+ "rotation": [StrOptions({"varimax", "quartimax"}), None],
178
+ "random_state": ["random_state"],
179
+ }
180
+
181
+ def __init__(
182
+ self,
183
+ n_components=None,
184
+ *,
185
+ tol=1e-2,
186
+ copy=True,
187
+ max_iter=1000,
188
+ noise_variance_init=None,
189
+ svd_method="randomized",
190
+ iterated_power=3,
191
+ rotation=None,
192
+ random_state=0,
193
+ ):
194
+ self.n_components = n_components
195
+ self.copy = copy
196
+ self.tol = tol
197
+ self.max_iter = max_iter
198
+ self.svd_method = svd_method
199
+
200
+ self.noise_variance_init = noise_variance_init
201
+ self.iterated_power = iterated_power
202
+ self.random_state = random_state
203
+ self.rotation = rotation
204
+
205
+ @_fit_context(prefer_skip_nested_validation=True)
206
+ def fit(self, X, y=None):
207
+ """Fit the FactorAnalysis model to X using SVD based approach.
208
+
209
+ Parameters
210
+ ----------
211
+ X : array-like of shape (n_samples, n_features)
212
+ Training data.
213
+
214
+ y : Ignored
215
+ Ignored parameter.
216
+
217
+ Returns
218
+ -------
219
+ self : object
220
+ FactorAnalysis class instance.
221
+ """
222
+ X = self._validate_data(X, copy=self.copy, dtype=np.float64)
223
+
224
+ n_samples, n_features = X.shape
225
+ n_components = self.n_components
226
+ if n_components is None:
227
+ n_components = n_features
228
+
229
+ self.mean_ = np.mean(X, axis=0)
230
+ X -= self.mean_
231
+
232
+ # some constant terms
233
+ nsqrt = sqrt(n_samples)
234
+ llconst = n_features * log(2.0 * np.pi) + n_components
235
+ var = np.var(X, axis=0)
236
+
237
+ if self.noise_variance_init is None:
238
+ psi = np.ones(n_features, dtype=X.dtype)
239
+ else:
240
+ if len(self.noise_variance_init) != n_features:
241
+ raise ValueError(
242
+ "noise_variance_init dimension does not "
243
+ "with number of features : %d != %d"
244
+ % (len(self.noise_variance_init), n_features)
245
+ )
246
+ psi = np.array(self.noise_variance_init)
247
+
248
+ loglike = []
249
+ old_ll = -np.inf
250
+ SMALL = 1e-12
251
+
252
+ # we'll modify svd outputs to return unexplained variance
253
+ # to allow for unified computation of loglikelihood
254
+ if self.svd_method == "lapack":
255
+
256
+ def my_svd(X):
257
+ _, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
258
+ return (
259
+ s[:n_components],
260
+ Vt[:n_components],
261
+ squared_norm(s[n_components:]),
262
+ )
263
+
264
+ else: # svd_method == "randomized"
265
+ random_state = check_random_state(self.random_state)
266
+
267
+ def my_svd(X):
268
+ _, s, Vt = randomized_svd(
269
+ X,
270
+ n_components,
271
+ random_state=random_state,
272
+ n_iter=self.iterated_power,
273
+ )
274
+ return s, Vt, squared_norm(X) - squared_norm(s)
275
+
276
+ for i in range(self.max_iter):
277
+ # SMALL helps numerics
278
+ sqrt_psi = np.sqrt(psi) + SMALL
279
+ s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
280
+ s **= 2
281
+ # Use 'maximum' here to avoid sqrt problems.
282
+ W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt
283
+ del Vt
284
+ W *= sqrt_psi
285
+
286
+ # loglikelihood
287
+ ll = llconst + np.sum(np.log(s))
288
+ ll += unexp_var + np.sum(np.log(psi))
289
+ ll *= -n_samples / 2.0
290
+ loglike.append(ll)
291
+ if (ll - old_ll) < self.tol:
292
+ break
293
+ old_ll = ll
294
+
295
+ psi = np.maximum(var - np.sum(W**2, axis=0), SMALL)
296
+ else:
297
+ warnings.warn(
298
+ "FactorAnalysis did not converge."
299
+ + " You might want"
300
+ + " to increase the number of iterations.",
301
+ ConvergenceWarning,
302
+ )
303
+
304
+ self.components_ = W
305
+ if self.rotation is not None:
306
+ self.components_ = self._rotate(W)
307
+ self.noise_variance_ = psi
308
+ self.loglike_ = loglike
309
+ self.n_iter_ = i + 1
310
+ return self
311
+
312
+ def transform(self, X):
313
+ """Apply dimensionality reduction to X using the model.
314
+
315
+ Compute the expected mean of the latent variables.
316
+ See Barber, 21.2.33 (or Bishop, 12.66).
317
+
318
+ Parameters
319
+ ----------
320
+ X : array-like of shape (n_samples, n_features)
321
+ Training data.
322
+
323
+ Returns
324
+ -------
325
+ X_new : ndarray of shape (n_samples, n_components)
326
+ The latent variables of X.
327
+ """
328
+ check_is_fitted(self)
329
+
330
+ X = self._validate_data(X, reset=False)
331
+ Ih = np.eye(len(self.components_))
332
+
333
+ X_transformed = X - self.mean_
334
+
335
+ Wpsi = self.components_ / self.noise_variance_
336
+ cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
337
+ tmp = np.dot(X_transformed, Wpsi.T)
338
+ X_transformed = np.dot(tmp, cov_z)
339
+
340
+ return X_transformed
341
+
342
+ def get_covariance(self):
343
+ """Compute data covariance with the FactorAnalysis model.
344
+
345
+ ``cov = components_.T * components_ + diag(noise_variance)``
346
+
347
+ Returns
348
+ -------
349
+ cov : ndarray of shape (n_features, n_features)
350
+ Estimated covariance of data.
351
+ """
352
+ check_is_fitted(self)
353
+
354
+ cov = np.dot(self.components_.T, self.components_)
355
+ cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
356
+ return cov
357
+
358
+ def get_precision(self):
359
+ """Compute data precision matrix with the FactorAnalysis model.
360
+
361
+ Returns
362
+ -------
363
+ precision : ndarray of shape (n_features, n_features)
364
+ Estimated precision of data.
365
+ """
366
+ check_is_fitted(self)
367
+
368
+ n_features = self.components_.shape[1]
369
+
370
+ # handle corner cases first
371
+ if self.n_components == 0:
372
+ return np.diag(1.0 / self.noise_variance_)
373
+ if self.n_components == n_features:
374
+ return linalg.inv(self.get_covariance())
375
+
376
+ # Get precision using matrix inversion lemma
377
+ components_ = self.components_
378
+ precision = np.dot(components_ / self.noise_variance_, components_.T)
379
+ precision.flat[:: len(precision) + 1] += 1.0
380
+ precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
381
+ precision /= self.noise_variance_[:, np.newaxis]
382
+ precision /= -self.noise_variance_[np.newaxis, :]
383
+ precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
384
+ return precision
385
+
386
+ def score_samples(self, X):
387
+ """Compute the log-likelihood of each sample.
388
+
389
+ Parameters
390
+ ----------
391
+ X : ndarray of shape (n_samples, n_features)
392
+ The data.
393
+
394
+ Returns
395
+ -------
396
+ ll : ndarray of shape (n_samples,)
397
+ Log-likelihood of each sample under the current model.
398
+ """
399
+ check_is_fitted(self)
400
+ X = self._validate_data(X, reset=False)
401
+ Xr = X - self.mean_
402
+ precision = self.get_precision()
403
+ n_features = X.shape[1]
404
+ log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
405
+ log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
406
+ return log_like
407
+
408
+ def score(self, X, y=None):
409
+ """Compute the average log-likelihood of the samples.
410
+
411
+ Parameters
412
+ ----------
413
+ X : ndarray of shape (n_samples, n_features)
414
+ The data.
415
+
416
+ y : Ignored
417
+ Ignored parameter.
418
+
419
+ Returns
420
+ -------
421
+ ll : float
422
+ Average log-likelihood of the samples under the current model.
423
+ """
424
+ return np.mean(self.score_samples(X))
425
+
426
+ def _rotate(self, components, n_components=None, tol=1e-6):
427
+ "Rotate the factor analysis solution."
428
+ # note that tol is not exposed
429
+ return _ortho_rotation(components.T, method=self.rotation, tol=tol)[
430
+ : self.n_components
431
+ ]
432
+
433
+ @property
434
+ def _n_features_out(self):
435
+ """Number of transformed output features."""
436
+ return self.components_.shape[0]
437
+
438
+
439
+ def _ortho_rotation(components, method="varimax", tol=1e-6, max_iter=100):
440
+ """Return rotated components."""
441
+ nrow, ncol = components.shape
442
+ rotation_matrix = np.eye(ncol)
443
+ var = 0
444
+
445
+ for _ in range(max_iter):
446
+ comp_rot = np.dot(components, rotation_matrix)
447
+ if method == "varimax":
448
+ tmp = comp_rot * np.transpose((comp_rot**2).sum(axis=0) / nrow)
449
+ elif method == "quartimax":
450
+ tmp = 0
451
+ u, s, v = np.linalg.svd(np.dot(components.T, comp_rot**3 - tmp))
452
+ rotation_matrix = np.dot(u, v)
453
+ var_new = np.sum(s)
454
+ if var != 0 and var_new < var * (1 + tol):
455
+ break
456
+ var = var_new
457
+
458
+ return np.dot(components, rotation_matrix).T
venv/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Python implementation of the fast ICA algorithms.
3
+
4
+ Reference: Tables 8.3 and 8.4 page 196 in the book:
5
+ Independent Component Analysis, by Hyvarinen et al.
6
+ """
7
+
8
+ # Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
9
+ # Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
10
+ # License: BSD 3 clause
11
+
12
+ import warnings
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ from scipy import linalg
17
+
18
+ from ..base import (
19
+ BaseEstimator,
20
+ ClassNamePrefixFeaturesOutMixin,
21
+ TransformerMixin,
22
+ _fit_context,
23
+ )
24
+ from ..exceptions import ConvergenceWarning
25
+ from ..utils import as_float_array, check_array, check_random_state
26
+ from ..utils._param_validation import Interval, Options, StrOptions, validate_params
27
+ from ..utils.validation import check_is_fitted
28
+
29
+ __all__ = ["fastica", "FastICA"]
30
+
31
+
32
+ def _gs_decorrelation(w, W, j):
33
+ """
34
+ Orthonormalize w wrt the first j rows of W.
35
+
36
+ Parameters
37
+ ----------
38
+ w : ndarray of shape (n,)
39
+ Array to be orthogonalized
40
+
41
+ W : ndarray of shape (p, n)
42
+ Null space definition
43
+
44
+ j : int < p
45
+ The no of (from the first) rows of Null space W wrt which w is
46
+ orthogonalized.
47
+
48
+ Notes
49
+ -----
50
+ Assumes that W is orthogonal
51
+ w changed in place
52
+ """
53
+ w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
54
+ return w
55
+
56
+
57
+ def _sym_decorrelation(W):
58
+ """Symmetric decorrelation
59
+ i.e. W <- (W * W.T) ^{-1/2} * W
60
+ """
61
+ s, u = linalg.eigh(np.dot(W, W.T))
62
+ # Avoid sqrt of negative values because of rounding errors. Note that
63
+ # np.sqrt(tiny) is larger than tiny and therefore this clipping also
64
+ # prevents division by zero in the next step.
65
+ s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None)
66
+
67
+ # u (resp. s) contains the eigenvectors (resp. square roots of
68
+ # the eigenvalues) of W * W.T
69
+ return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])
70
+
71
+
72
+ def _ica_def(X, tol, g, fun_args, max_iter, w_init):
73
+ """Deflationary FastICA using fun approx to neg-entropy function
74
+
75
+ Used internally by FastICA.
76
+ """
77
+
78
+ n_components = w_init.shape[0]
79
+ W = np.zeros((n_components, n_components), dtype=X.dtype)
80
+ n_iter = []
81
+
82
+ # j is the index of the extracted component
83
+ for j in range(n_components):
84
+ w = w_init[j, :].copy()
85
+ w /= np.sqrt((w**2).sum())
86
+
87
+ for i in range(max_iter):
88
+ gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
89
+
90
+ w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
91
+
92
+ _gs_decorrelation(w1, W, j)
93
+
94
+ w1 /= np.sqrt((w1**2).sum())
95
+
96
+ lim = np.abs(np.abs((w1 * w).sum()) - 1)
97
+ w = w1
98
+ if lim < tol:
99
+ break
100
+
101
+ n_iter.append(i + 1)
102
+ W[j, :] = w
103
+
104
+ return W, max(n_iter)
105
+
106
+
107
+ def _ica_par(X, tol, g, fun_args, max_iter, w_init):
108
+ """Parallel FastICA.
109
+
110
+ Used internally by FastICA --main loop
111
+
112
+ """
113
+ W = _sym_decorrelation(w_init)
114
+ del w_init
115
+ p_ = float(X.shape[1])
116
+ for ii in range(max_iter):
117
+ gwtx, g_wtx = g(np.dot(W, X), fun_args)
118
+ W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)
119
+ del gwtx, g_wtx
120
+ # builtin max, abs are faster than numpy counter parts.
121
+ # np.einsum allows having the lowest memory footprint.
122
+ # It is faster than np.diag(np.dot(W1, W.T)).
123
+ lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1))
124
+ W = W1
125
+ if lim < tol:
126
+ break
127
+ else:
128
+ warnings.warn(
129
+ (
130
+ "FastICA did not converge. Consider increasing "
131
+ "tolerance or the maximum number of iterations."
132
+ ),
133
+ ConvergenceWarning,
134
+ )
135
+
136
+ return W, ii + 1
137
+
138
+
139
+ # Some standard non-linear functions.
140
+ # XXX: these should be optimized, as they can be a bottleneck.
141
+ def _logcosh(x, fun_args=None):
142
+ alpha = fun_args.get("alpha", 1.0) # comment it out?
143
+
144
+ x *= alpha
145
+ gx = np.tanh(x, x) # apply the tanh inplace
146
+ g_x = np.empty(x.shape[0], dtype=x.dtype)
147
+ # XXX compute in chunks to avoid extra allocation
148
+ for i, gx_i in enumerate(gx): # please don't vectorize.
149
+ g_x[i] = (alpha * (1 - gx_i**2)).mean()
150
+ return gx, g_x
151
+
152
+
153
+ def _exp(x, fun_args):
154
+ exp = np.exp(-(x**2) / 2)
155
+ gx = x * exp
156
+ g_x = (1 - x**2) * exp
157
+ return gx, g_x.mean(axis=-1)
158
+
159
+
160
+ def _cube(x, fun_args):
161
+ return x**3, (3 * x**2).mean(axis=-1)
162
+
163
+
164
+ @validate_params(
165
+ {
166
+ "X": ["array-like"],
167
+ "return_X_mean": ["boolean"],
168
+ "compute_sources": ["boolean"],
169
+ "return_n_iter": ["boolean"],
170
+ },
171
+ prefer_skip_nested_validation=False,
172
+ )
173
+ def fastica(
174
+ X,
175
+ n_components=None,
176
+ *,
177
+ algorithm="parallel",
178
+ whiten="unit-variance",
179
+ fun="logcosh",
180
+ fun_args=None,
181
+ max_iter=200,
182
+ tol=1e-04,
183
+ w_init=None,
184
+ whiten_solver="svd",
185
+ random_state=None,
186
+ return_X_mean=False,
187
+ compute_sources=True,
188
+ return_n_iter=False,
189
+ ):
190
+ """Perform Fast Independent Component Analysis.
191
+
192
+ The implementation is based on [1]_.
193
+
194
+ Read more in the :ref:`User Guide <ICA>`.
195
+
196
+ Parameters
197
+ ----------
198
+ X : array-like of shape (n_samples, n_features)
199
+ Training vector, where `n_samples` is the number of samples and
200
+ `n_features` is the number of features.
201
+
202
+ n_components : int, default=None
203
+ Number of components to use. If None is passed, all are used.
204
+
205
+ algorithm : {'parallel', 'deflation'}, default='parallel'
206
+ Specify which algorithm to use for FastICA.
207
+
208
+ whiten : str or bool, default='unit-variance'
209
+ Specify the whitening strategy to use.
210
+
211
+ - If 'arbitrary-variance', a whitening with variance
212
+ arbitrary is used.
213
+ - If 'unit-variance', the whitening matrix is rescaled to ensure that
214
+ each recovered source has unit variance.
215
+ - If False, the data is already considered to be whitened, and no
216
+ whitening is performed.
217
+
218
+ .. versionchanged:: 1.3
219
+ The default value of `whiten` changed to 'unit-variance' in 1.3.
220
+
221
+ fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
222
+ The functional form of the G function used in the
223
+ approximation to neg-entropy. Could be either 'logcosh', 'exp',
224
+ or 'cube'.
225
+ You can also provide your own function. It should return a tuple
226
+ containing the value of the function, and of its derivative, in the
227
+ point. The derivative should be averaged along its last dimension.
228
+ Example::
229
+
230
+ def my_g(x):
231
+ return x ** 3, (3 * x ** 2).mean(axis=-1)
232
+
233
+ fun_args : dict, default=None
234
+ Arguments to send to the functional form.
235
+ If empty or None and if fun='logcosh', fun_args will take value
236
+ {'alpha' : 1.0}.
237
+
238
+ max_iter : int, default=200
239
+ Maximum number of iterations to perform.
240
+
241
+ tol : float, default=1e-4
242
+ A positive scalar giving the tolerance at which the
243
+ un-mixing matrix is considered to have converged.
244
+
245
+ w_init : ndarray of shape (n_components, n_components), default=None
246
+ Initial un-mixing array. If `w_init=None`, then an array of values
247
+ drawn from a normal distribution is used.
248
+
249
+ whiten_solver : {"eigh", "svd"}, default="svd"
250
+ The solver to use for whitening.
251
+
252
+ - "svd" is more stable numerically if the problem is degenerate, and
253
+ often faster when `n_samples <= n_features`.
254
+
255
+ - "eigh" is generally more memory efficient when
256
+ `n_samples >= n_features`, and can be faster when
257
+ `n_samples >= 50 * n_features`.
258
+
259
+ .. versionadded:: 1.2
260
+
261
+ random_state : int, RandomState instance or None, default=None
262
+ Used to initialize ``w_init`` when not specified, with a
263
+ normal distribution. Pass an int, for reproducible results
264
+ across multiple function calls.
265
+ See :term:`Glossary <random_state>`.
266
+
267
+ return_X_mean : bool, default=False
268
+ If True, X_mean is returned too.
269
+
270
+ compute_sources : bool, default=True
271
+ If False, sources are not computed, but only the rotation matrix.
272
+ This can save memory when working with big data. Defaults to True.
273
+
274
+ return_n_iter : bool, default=False
275
+ Whether or not to return the number of iterations.
276
+
277
+ Returns
278
+ -------
279
+ K : ndarray of shape (n_components, n_features) or None
280
+ If whiten is 'True', K is the pre-whitening matrix that projects data
281
+ onto the first n_components principal components. If whiten is 'False',
282
+ K is 'None'.
283
+
284
+ W : ndarray of shape (n_components, n_components)
285
+ The square matrix that unmixes the data after whitening.
286
+ The mixing matrix is the pseudo-inverse of matrix ``W K``
287
+ if K is not None, else it is the inverse of W.
288
+
289
+ S : ndarray of shape (n_samples, n_components) or None
290
+ Estimated source matrix.
291
+
292
+ X_mean : ndarray of shape (n_features,)
293
+ The mean over features. Returned only if return_X_mean is True.
294
+
295
+ n_iter : int
296
+ If the algorithm is "deflation", n_iter is the
297
+ maximum number of iterations run across all components. Else
298
+ they are just the number of iterations taken to converge. This is
299
+ returned only when return_n_iter is set to `True`.
300
+
301
+ Notes
302
+ -----
303
+ The data matrix X is considered to be a linear combination of
304
+ non-Gaussian (independent) components i.e. X = AS where columns of S
305
+ contain the independent components and A is a linear mixing
306
+ matrix. In short ICA attempts to `un-mix' the data by estimating an
307
+ un-mixing matrix W where ``S = W K X.``
308
+ While FastICA was proposed to estimate as many sources
309
+ as features, it is possible to estimate less by setting
310
+ n_components < n_features. It this case K is not a square matrix
311
+ and the estimated A is the pseudo-inverse of ``W K``.
312
+
313
+ This implementation was originally made for data of shape
314
+ [n_features, n_samples]. Now the input is transposed
315
+ before the algorithm is applied. This makes it slightly
316
+ faster for Fortran-ordered input.
317
+
318
+ References
319
+ ----------
320
+ .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
321
+ Algorithms and Applications, Neural Networks, 13(4-5), 2000,
322
+ pp. 411-430.
323
+
324
+ Examples
325
+ --------
326
+ >>> from sklearn.datasets import load_digits
327
+ >>> from sklearn.decomposition import fastica
328
+ >>> X, _ = load_digits(return_X_y=True)
329
+ >>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance')
330
+ >>> K.shape
331
+ (7, 64)
332
+ >>> W.shape
333
+ (7, 7)
334
+ >>> S.shape
335
+ (1797, 7)
336
+ """
337
+ est = FastICA(
338
+ n_components=n_components,
339
+ algorithm=algorithm,
340
+ whiten=whiten,
341
+ fun=fun,
342
+ fun_args=fun_args,
343
+ max_iter=max_iter,
344
+ tol=tol,
345
+ w_init=w_init,
346
+ whiten_solver=whiten_solver,
347
+ random_state=random_state,
348
+ )
349
+ est._validate_params()
350
+ S = est._fit_transform(X, compute_sources=compute_sources)
351
+
352
+ if est.whiten in ["unit-variance", "arbitrary-variance"]:
353
+ K = est.whitening_
354
+ X_mean = est.mean_
355
+ else:
356
+ K = None
357
+ X_mean = None
358
+
359
+ returned_values = [K, est._unmixing, S]
360
+ if return_X_mean:
361
+ returned_values.append(X_mean)
362
+ if return_n_iter:
363
+ returned_values.append(est.n_iter_)
364
+
365
+ return returned_values
366
+
367
+
368
+ class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
369
+ """FastICA: a fast algorithm for Independent Component Analysis.
370
+
371
+ The implementation is based on [1]_.
372
+
373
+ Read more in the :ref:`User Guide <ICA>`.
374
+
375
+ Parameters
376
+ ----------
377
+ n_components : int, default=None
378
+ Number of components to use. If None is passed, all are used.
379
+
380
+ algorithm : {'parallel', 'deflation'}, default='parallel'
381
+ Specify which algorithm to use for FastICA.
382
+
383
+ whiten : str or bool, default='unit-variance'
384
+ Specify the whitening strategy to use.
385
+
386
+ - If 'arbitrary-variance', a whitening with variance
387
+ arbitrary is used.
388
+ - If 'unit-variance', the whitening matrix is rescaled to ensure that
389
+ each recovered source has unit variance.
390
+ - If False, the data is already considered to be whitened, and no
391
+ whitening is performed.
392
+
393
+ .. versionchanged:: 1.3
394
+ The default value of `whiten` changed to 'unit-variance' in 1.3.
395
+
396
+ fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
397
+ The functional form of the G function used in the
398
+ approximation to neg-entropy. Could be either 'logcosh', 'exp',
399
+ or 'cube'.
400
+ You can also provide your own function. It should return a tuple
401
+ containing the value of the function, and of its derivative, in the
402
+ point. The derivative should be averaged along its last dimension.
403
+ Example::
404
+
405
+ def my_g(x):
406
+ return x ** 3, (3 * x ** 2).mean(axis=-1)
407
+
408
+ fun_args : dict, default=None
409
+ Arguments to send to the functional form.
410
+ If empty or None and if fun='logcosh', fun_args will take value
411
+ {'alpha' : 1.0}.
412
+
413
+ max_iter : int, default=200
414
+ Maximum number of iterations during fit.
415
+
416
+ tol : float, default=1e-4
417
+ A positive scalar giving the tolerance at which the
418
+ un-mixing matrix is considered to have converged.
419
+
420
+ w_init : array-like of shape (n_components, n_components), default=None
421
+ Initial un-mixing array. If `w_init=None`, then an array of values
422
+ drawn from a normal distribution is used.
423
+
424
+ whiten_solver : {"eigh", "svd"}, default="svd"
425
+ The solver to use for whitening.
426
+
427
+ - "svd" is more stable numerically if the problem is degenerate, and
428
+ often faster when `n_samples <= n_features`.
429
+
430
+ - "eigh" is generally more memory efficient when
431
+ `n_samples >= n_features`, and can be faster when
432
+ `n_samples >= 50 * n_features`.
433
+
434
+ .. versionadded:: 1.2
435
+
436
+ random_state : int, RandomState instance or None, default=None
437
+ Used to initialize ``w_init`` when not specified, with a
438
+ normal distribution. Pass an int, for reproducible results
439
+ across multiple function calls.
440
+ See :term:`Glossary <random_state>`.
441
+
442
+ Attributes
443
+ ----------
444
+ components_ : ndarray of shape (n_components, n_features)
445
+ The linear operator to apply to the data to get the independent
446
+ sources. This is equal to the unmixing matrix when ``whiten`` is
447
+ False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
448
+ ``whiten`` is True.
449
+
450
+ mixing_ : ndarray of shape (n_features, n_components)
451
+ The pseudo-inverse of ``components_``. It is the linear operator
452
+ that maps independent sources to the data.
453
+
454
+ mean_ : ndarray of shape(n_features,)
455
+ The mean over features. Only set if `self.whiten` is True.
456
+
457
+ n_features_in_ : int
458
+ Number of features seen during :term:`fit`.
459
+
460
+ .. versionadded:: 0.24
461
+
462
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
463
+ Names of features seen during :term:`fit`. Defined only when `X`
464
+ has feature names that are all strings.
465
+
466
+ .. versionadded:: 1.0
467
+
468
+ n_iter_ : int
469
+ If the algorithm is "deflation", n_iter is the
470
+ maximum number of iterations run across all components. Else
471
+ they are just the number of iterations taken to converge.
472
+
473
+ whitening_ : ndarray of shape (n_components, n_features)
474
+ Only set if whiten is 'True'. This is the pre-whitening matrix
475
+ that projects data onto the first `n_components` principal components.
476
+
477
+ See Also
478
+ --------
479
+ PCA : Principal component analysis (PCA).
480
+ IncrementalPCA : Incremental principal components analysis (IPCA).
481
+ KernelPCA : Kernel Principal component analysis (KPCA).
482
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
483
+ SparsePCA : Sparse Principal Components Analysis (SparsePCA).
484
+
485
+ References
486
+ ----------
487
+ .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:
488
+ Algorithms and Applications, Neural Networks, 13(4-5), 2000,
489
+ pp. 411-430.
490
+
491
+ Examples
492
+ --------
493
+ >>> from sklearn.datasets import load_digits
494
+ >>> from sklearn.decomposition import FastICA
495
+ >>> X, _ = load_digits(return_X_y=True)
496
+ >>> transformer = FastICA(n_components=7,
497
+ ... random_state=0,
498
+ ... whiten='unit-variance')
499
+ >>> X_transformed = transformer.fit_transform(X)
500
+ >>> X_transformed.shape
501
+ (1797, 7)
502
+ """
503
+
504
+ _parameter_constraints: dict = {
505
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
506
+ "algorithm": [StrOptions({"parallel", "deflation"})],
507
+ "whiten": [
508
+ StrOptions({"arbitrary-variance", "unit-variance"}),
509
+ Options(bool, {False}),
510
+ ],
511
+ "fun": [StrOptions({"logcosh", "exp", "cube"}), callable],
512
+ "fun_args": [dict, None],
513
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
514
+ "tol": [Interval(Real, 0.0, None, closed="left")],
515
+ "w_init": ["array-like", None],
516
+ "whiten_solver": [StrOptions({"eigh", "svd"})],
517
+ "random_state": ["random_state"],
518
+ }
519
+
520
+ def __init__(
521
+ self,
522
+ n_components=None,
523
+ *,
524
+ algorithm="parallel",
525
+ whiten="unit-variance",
526
+ fun="logcosh",
527
+ fun_args=None,
528
+ max_iter=200,
529
+ tol=1e-4,
530
+ w_init=None,
531
+ whiten_solver="svd",
532
+ random_state=None,
533
+ ):
534
+ super().__init__()
535
+ self.n_components = n_components
536
+ self.algorithm = algorithm
537
+ self.whiten = whiten
538
+ self.fun = fun
539
+ self.fun_args = fun_args
540
+ self.max_iter = max_iter
541
+ self.tol = tol
542
+ self.w_init = w_init
543
+ self.whiten_solver = whiten_solver
544
+ self.random_state = random_state
545
+
546
+ def _fit_transform(self, X, compute_sources=False):
547
+ """Fit the model.
548
+
549
+ Parameters
550
+ ----------
551
+ X : array-like of shape (n_samples, n_features)
552
+ Training data, where `n_samples` is the number of samples
553
+ and `n_features` is the number of features.
554
+
555
+ compute_sources : bool, default=False
556
+ If False, sources are not computes but only the rotation matrix.
557
+ This can save memory when working with big data. Defaults to False.
558
+
559
+ Returns
560
+ -------
561
+ S : ndarray of shape (n_samples, n_components) or None
562
+ Sources matrix. `None` if `compute_sources` is `False`.
563
+ """
564
+ XT = self._validate_data(
565
+ X, copy=self.whiten, dtype=[np.float64, np.float32], ensure_min_samples=2
566
+ ).T
567
+ fun_args = {} if self.fun_args is None else self.fun_args
568
+ random_state = check_random_state(self.random_state)
569
+
570
+ alpha = fun_args.get("alpha", 1.0)
571
+ if not 1 <= alpha <= 2:
572
+ raise ValueError("alpha must be in [1,2]")
573
+
574
+ if self.fun == "logcosh":
575
+ g = _logcosh
576
+ elif self.fun == "exp":
577
+ g = _exp
578
+ elif self.fun == "cube":
579
+ g = _cube
580
+ elif callable(self.fun):
581
+
582
+ def g(x, fun_args):
583
+ return self.fun(x, **fun_args)
584
+
585
+ n_features, n_samples = XT.shape
586
+ n_components = self.n_components
587
+ if not self.whiten and n_components is not None:
588
+ n_components = None
589
+ warnings.warn("Ignoring n_components with whiten=False.")
590
+
591
+ if n_components is None:
592
+ n_components = min(n_samples, n_features)
593
+ if n_components > min(n_samples, n_features):
594
+ n_components = min(n_samples, n_features)
595
+ warnings.warn(
596
+ "n_components is too large: it will be set to %s" % n_components
597
+ )
598
+
599
+ if self.whiten:
600
+ # Centering the features of X
601
+ X_mean = XT.mean(axis=-1)
602
+ XT -= X_mean[:, np.newaxis]
603
+
604
+ # Whitening and preprocessing by PCA
605
+ if self.whiten_solver == "eigh":
606
+ # Faster when num_samples >> n_features
607
+ d, u = linalg.eigh(XT.dot(X))
608
+ sort_indices = np.argsort(d)[::-1]
609
+ eps = np.finfo(d.dtype).eps
610
+ degenerate_idx = d < eps
611
+ if np.any(degenerate_idx):
612
+ warnings.warn(
613
+ "There are some small singular values, using "
614
+ "whiten_solver = 'svd' might lead to more "
615
+ "accurate results."
616
+ )
617
+ d[degenerate_idx] = eps # For numerical issues
618
+ np.sqrt(d, out=d)
619
+ d, u = d[sort_indices], u[:, sort_indices]
620
+ elif self.whiten_solver == "svd":
621
+ u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
622
+
623
+ # Give consistent eigenvectors for both svd solvers
624
+ u *= np.sign(u[0])
625
+
626
+ K = (u / d).T[:n_components] # see (6.33) p.140
627
+ del u, d
628
+ X1 = np.dot(K, XT)
629
+ # see (13.6) p.267 Here X1 is white and data
630
+ # in X has been projected onto a subspace by PCA
631
+ X1 *= np.sqrt(n_samples)
632
+ else:
633
+ # X must be casted to floats to avoid typing issues with numpy
634
+ # 2.0 and the line below
635
+ X1 = as_float_array(XT, copy=False) # copy has been taken care of
636
+
637
+ w_init = self.w_init
638
+ if w_init is None:
639
+ w_init = np.asarray(
640
+ random_state.normal(size=(n_components, n_components)), dtype=X1.dtype
641
+ )
642
+
643
+ else:
644
+ w_init = np.asarray(w_init)
645
+ if w_init.shape != (n_components, n_components):
646
+ raise ValueError(
647
+ "w_init has invalid shape -- should be %(shape)s"
648
+ % {"shape": (n_components, n_components)}
649
+ )
650
+
651
+ kwargs = {
652
+ "tol": self.tol,
653
+ "g": g,
654
+ "fun_args": fun_args,
655
+ "max_iter": self.max_iter,
656
+ "w_init": w_init,
657
+ }
658
+
659
+ if self.algorithm == "parallel":
660
+ W, n_iter = _ica_par(X1, **kwargs)
661
+ elif self.algorithm == "deflation":
662
+ W, n_iter = _ica_def(X1, **kwargs)
663
+ del X1
664
+
665
+ self.n_iter_ = n_iter
666
+
667
+ if compute_sources:
668
+ if self.whiten:
669
+ S = np.linalg.multi_dot([W, K, XT]).T
670
+ else:
671
+ S = np.dot(W, XT).T
672
+ else:
673
+ S = None
674
+
675
+ if self.whiten:
676
+ if self.whiten == "unit-variance":
677
+ if not compute_sources:
678
+ S = np.linalg.multi_dot([W, K, XT]).T
679
+ S_std = np.std(S, axis=0, keepdims=True)
680
+ S /= S_std
681
+ W /= S_std.T
682
+
683
+ self.components_ = np.dot(W, K)
684
+ self.mean_ = X_mean
685
+ self.whitening_ = K
686
+ else:
687
+ self.components_ = W
688
+
689
+ self.mixing_ = linalg.pinv(self.components_, check_finite=False)
690
+ self._unmixing = W
691
+
692
+ return S
693
+
694
+ @_fit_context(prefer_skip_nested_validation=True)
695
+ def fit_transform(self, X, y=None):
696
+ """Fit the model and recover the sources from X.
697
+
698
+ Parameters
699
+ ----------
700
+ X : array-like of shape (n_samples, n_features)
701
+ Training data, where `n_samples` is the number of samples
702
+ and `n_features` is the number of features.
703
+
704
+ y : Ignored
705
+ Not used, present for API consistency by convention.
706
+
707
+ Returns
708
+ -------
709
+ X_new : ndarray of shape (n_samples, n_components)
710
+ Estimated sources obtained by transforming the data with the
711
+ estimated unmixing matrix.
712
+ """
713
+ return self._fit_transform(X, compute_sources=True)
714
+
715
+ @_fit_context(prefer_skip_nested_validation=True)
716
+ def fit(self, X, y=None):
717
+ """Fit the model to X.
718
+
719
+ Parameters
720
+ ----------
721
+ X : array-like of shape (n_samples, n_features)
722
+ Training data, where `n_samples` is the number of samples
723
+ and `n_features` is the number of features.
724
+
725
+ y : Ignored
726
+ Not used, present for API consistency by convention.
727
+
728
+ Returns
729
+ -------
730
+ self : object
731
+ Returns the instance itself.
732
+ """
733
+ self._fit_transform(X, compute_sources=False)
734
+ return self
735
+
736
+ def transform(self, X, copy=True):
737
+ """Recover the sources from X (apply the unmixing matrix).
738
+
739
+ Parameters
740
+ ----------
741
+ X : array-like of shape (n_samples, n_features)
742
+ Data to transform, where `n_samples` is the number of samples
743
+ and `n_features` is the number of features.
744
+
745
+ copy : bool, default=True
746
+ If False, data passed to fit can be overwritten. Defaults to True.
747
+
748
+ Returns
749
+ -------
750
+ X_new : ndarray of shape (n_samples, n_components)
751
+ Estimated sources obtained by transforming the data with the
752
+ estimated unmixing matrix.
753
+ """
754
+ check_is_fitted(self)
755
+
756
+ X = self._validate_data(
757
+ X, copy=(copy and self.whiten), dtype=[np.float64, np.float32], reset=False
758
+ )
759
+ if self.whiten:
760
+ X -= self.mean_
761
+
762
+ return np.dot(X, self.components_.T)
763
+
764
+ def inverse_transform(self, X, copy=True):
765
+ """Transform the sources back to the mixed data (apply mixing matrix).
766
+
767
+ Parameters
768
+ ----------
769
+ X : array-like of shape (n_samples, n_components)
770
+ Sources, where `n_samples` is the number of samples
771
+ and `n_components` is the number of components.
772
+ copy : bool, default=True
773
+ If False, data passed to fit are overwritten. Defaults to True.
774
+
775
+ Returns
776
+ -------
777
+ X_new : ndarray of shape (n_samples, n_features)
778
+ Reconstructed data obtained with the mixing matrix.
779
+ """
780
+ check_is_fitted(self)
781
+
782
+ X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32])
783
+ X = np.dot(X, self.mixing_.T)
784
+ if self.whiten:
785
+ X += self.mean_
786
+
787
+ return X
788
+
789
+ @property
790
+ def _n_features_out(self):
791
+ """Number of transformed output features."""
792
+ return self.components_.shape[0]
793
+
794
+ def _more_tags(self):
795
+ return {"preserves_dtype": [np.float32, np.float64]}
venv/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Incremental Principal Components Analysis."""
2
+
3
+ # Author: Kyle Kastner <[email protected]>
4
+ # Giorgio Patrini
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral
8
+
9
+ import numpy as np
10
+ from scipy import linalg, sparse
11
+
12
+ from ..base import _fit_context
13
+ from ..utils import gen_batches
14
+ from ..utils._param_validation import Interval
15
+ from ..utils.extmath import _incremental_mean_and_var, svd_flip
16
+ from ._base import _BasePCA
17
+
18
+
19
+ class IncrementalPCA(_BasePCA):
20
+ """Incremental principal components analysis (IPCA).
21
+
22
+ Linear dimensionality reduction using Singular Value Decomposition of
23
+ the data, keeping only the most significant singular vectors to
24
+ project the data to a lower dimensional space. The input data is centered
25
+ but not scaled for each feature before applying the SVD.
26
+
27
+ Depending on the size of the input data, this algorithm can be much more
28
+ memory efficient than a PCA, and allows sparse input.
29
+
30
+ This algorithm has constant memory complexity, on the order
31
+ of ``batch_size * n_features``, enabling use of np.memmap files without
32
+ loading the entire file into memory. For sparse matrices, the input
33
+ is converted to dense in batches (in order to be able to subtract the
34
+ mean) which avoids storing the entire dense matrix at any one time.
35
+
36
+ The computational overhead of each SVD is
37
+ ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
38
+ remain in memory at a time. There will be ``n_samples / batch_size`` SVD
39
+ computations to get the principal components, versus 1 large SVD of
40
+ complexity ``O(n_samples * n_features ** 2)`` for PCA.
41
+
42
+ For a usage example, see
43
+ :ref:`sphx_glr_auto_examples_decomposition_plot_incremental_pca.py`.
44
+
45
+ Read more in the :ref:`User Guide <IncrementalPCA>`.
46
+
47
+ .. versionadded:: 0.16
48
+
49
+ Parameters
50
+ ----------
51
+ n_components : int, default=None
52
+ Number of components to keep. If ``n_components`` is ``None``,
53
+ then ``n_components`` is set to ``min(n_samples, n_features)``.
54
+
55
+ whiten : bool, default=False
56
+ When True (False by default) the ``components_`` vectors are divided
57
+ by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
58
+ with unit component-wise variances.
59
+
60
+ Whitening will remove some information from the transformed signal
61
+ (the relative variance scales of the components) but can sometimes
62
+ improve the predictive accuracy of the downstream estimators by
63
+ making data respect some hard-wired assumptions.
64
+
65
+ copy : bool, default=True
66
+ If False, X will be overwritten. ``copy=False`` can be used to
67
+ save memory but is unsafe for general use.
68
+
69
+ batch_size : int, default=None
70
+ The number of samples to use for each batch. Only used when calling
71
+ ``fit``. If ``batch_size`` is ``None``, then ``batch_size``
72
+ is inferred from the data and set to ``5 * n_features``, to provide a
73
+ balance between approximation accuracy and memory consumption.
74
+
75
+ Attributes
76
+ ----------
77
+ components_ : ndarray of shape (n_components, n_features)
78
+ Principal axes in feature space, representing the directions of
79
+ maximum variance in the data. Equivalently, the right singular
80
+ vectors of the centered input data, parallel to its eigenvectors.
81
+ The components are sorted by decreasing ``explained_variance_``.
82
+
83
+ explained_variance_ : ndarray of shape (n_components,)
84
+ Variance explained by each of the selected components.
85
+
86
+ explained_variance_ratio_ : ndarray of shape (n_components,)
87
+ Percentage of variance explained by each of the selected components.
88
+ If all components are stored, the sum of explained variances is equal
89
+ to 1.0.
90
+
91
+ singular_values_ : ndarray of shape (n_components,)
92
+ The singular values corresponding to each of the selected components.
93
+ The singular values are equal to the 2-norms of the ``n_components``
94
+ variables in the lower-dimensional space.
95
+
96
+ mean_ : ndarray of shape (n_features,)
97
+ Per-feature empirical mean, aggregate over calls to ``partial_fit``.
98
+
99
+ var_ : ndarray of shape (n_features,)
100
+ Per-feature empirical variance, aggregate over calls to
101
+ ``partial_fit``.
102
+
103
+ noise_variance_ : float
104
+ The estimated noise covariance following the Probabilistic PCA model
105
+ from Tipping and Bishop 1999. See "Pattern Recognition and
106
+ Machine Learning" by C. Bishop, 12.2.1 p. 574 or
107
+ http://www.miketipping.com/papers/met-mppca.pdf.
108
+
109
+ n_components_ : int
110
+ The estimated number of components. Relevant when
111
+ ``n_components=None``.
112
+
113
+ n_samples_seen_ : int
114
+ The number of samples processed by the estimator. Will be reset on
115
+ new calls to fit, but increments across ``partial_fit`` calls.
116
+
117
+ batch_size_ : int
118
+ Inferred batch size from ``batch_size``.
119
+
120
+ n_features_in_ : int
121
+ Number of features seen during :term:`fit`.
122
+
123
+ .. versionadded:: 0.24
124
+
125
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
126
+ Names of features seen during :term:`fit`. Defined only when `X`
127
+ has feature names that are all strings.
128
+
129
+ .. versionadded:: 1.0
130
+
131
+ See Also
132
+ --------
133
+ PCA : Principal component analysis (PCA).
134
+ KernelPCA : Kernel Principal component analysis (KPCA).
135
+ SparsePCA : Sparse Principal Components Analysis (SparsePCA).
136
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
137
+
138
+ Notes
139
+ -----
140
+ Implements the incremental PCA model from:
141
+ *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
142
+ Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
143
+ pp. 125-141, May 2008.*
144
+ See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
145
+
146
+ This model is an extension of the Sequential Karhunen-Loeve Transform from:
147
+ :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
148
+ its Application to Images, IEEE Transactions on Image Processing, Volume 9,
149
+ Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>`
150
+
151
+ We have specifically abstained from an optimization used by authors of both
152
+ papers, a QR decomposition used in specific situations to reduce the
153
+ algorithmic complexity of the SVD. The source for this technique is
154
+ *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
155
+ section 5.4.4, pp 252-253.*. This technique has been omitted because it is
156
+ advantageous only when decomposing a matrix with ``n_samples`` (rows)
157
+ >= 5/3 * ``n_features`` (columns), and hurts the readability of the
158
+ implemented algorithm. This would be a good opportunity for future
159
+ optimization, if it is deemed necessary.
160
+
161
+ References
162
+ ----------
163
+ D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
164
+ Tracking, International Journal of Computer Vision, Volume 77,
165
+ Issue 1-3, pp. 125-141, May 2008.
166
+
167
+ G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
168
+ Section 5.4.4, pp. 252-253.
169
+
170
+ Examples
171
+ --------
172
+ >>> from sklearn.datasets import load_digits
173
+ >>> from sklearn.decomposition import IncrementalPCA
174
+ >>> from scipy import sparse
175
+ >>> X, _ = load_digits(return_X_y=True)
176
+ >>> transformer = IncrementalPCA(n_components=7, batch_size=200)
177
+ >>> # either partially fit on smaller batches of data
178
+ >>> transformer.partial_fit(X[:100, :])
179
+ IncrementalPCA(batch_size=200, n_components=7)
180
+ >>> # or let the fit function itself divide the data into batches
181
+ >>> X_sparse = sparse.csr_matrix(X)
182
+ >>> X_transformed = transformer.fit_transform(X_sparse)
183
+ >>> X_transformed.shape
184
+ (1797, 7)
185
+ """
186
+
187
+ _parameter_constraints: dict = {
188
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
189
+ "whiten": ["boolean"],
190
+ "copy": ["boolean"],
191
+ "batch_size": [Interval(Integral, 1, None, closed="left"), None],
192
+ }
193
+
194
+ def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None):
195
+ self.n_components = n_components
196
+ self.whiten = whiten
197
+ self.copy = copy
198
+ self.batch_size = batch_size
199
+
200
+ @_fit_context(prefer_skip_nested_validation=True)
201
+ def fit(self, X, y=None):
202
+ """Fit the model with X, using minibatches of size batch_size.
203
+
204
+ Parameters
205
+ ----------
206
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
207
+ Training data, where `n_samples` is the number of samples and
208
+ `n_features` is the number of features.
209
+
210
+ y : Ignored
211
+ Not used, present for API consistency by convention.
212
+
213
+ Returns
214
+ -------
215
+ self : object
216
+ Returns the instance itself.
217
+ """
218
+ self.components_ = None
219
+ self.n_samples_seen_ = 0
220
+ self.mean_ = 0.0
221
+ self.var_ = 0.0
222
+ self.singular_values_ = None
223
+ self.explained_variance_ = None
224
+ self.explained_variance_ratio_ = None
225
+ self.noise_variance_ = None
226
+
227
+ X = self._validate_data(
228
+ X,
229
+ accept_sparse=["csr", "csc", "lil"],
230
+ copy=self.copy,
231
+ dtype=[np.float64, np.float32],
232
+ )
233
+ n_samples, n_features = X.shape
234
+
235
+ if self.batch_size is None:
236
+ self.batch_size_ = 5 * n_features
237
+ else:
238
+ self.batch_size_ = self.batch_size
239
+
240
+ for batch in gen_batches(
241
+ n_samples, self.batch_size_, min_batch_size=self.n_components or 0
242
+ ):
243
+ X_batch = X[batch]
244
+ if sparse.issparse(X_batch):
245
+ X_batch = X_batch.toarray()
246
+ self.partial_fit(X_batch, check_input=False)
247
+
248
+ return self
249
+
250
+ @_fit_context(prefer_skip_nested_validation=True)
251
+ def partial_fit(self, X, y=None, check_input=True):
252
+ """Incremental fit with X. All of X is processed as a single batch.
253
+
254
+ Parameters
255
+ ----------
256
+ X : array-like of shape (n_samples, n_features)
257
+ Training data, where `n_samples` is the number of samples and
258
+ `n_features` is the number of features.
259
+
260
+ y : Ignored
261
+ Not used, present for API consistency by convention.
262
+
263
+ check_input : bool, default=True
264
+ Run check_array on X.
265
+
266
+ Returns
267
+ -------
268
+ self : object
269
+ Returns the instance itself.
270
+ """
271
+ first_pass = not hasattr(self, "components_")
272
+
273
+ if check_input:
274
+ if sparse.issparse(X):
275
+ raise TypeError(
276
+ "IncrementalPCA.partial_fit does not support "
277
+ "sparse input. Either convert data to dense "
278
+ "or use IncrementalPCA.fit to do so in batches."
279
+ )
280
+ X = self._validate_data(
281
+ X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass
282
+ )
283
+ n_samples, n_features = X.shape
284
+ if first_pass:
285
+ self.components_ = None
286
+
287
+ if self.n_components is None:
288
+ if self.components_ is None:
289
+ self.n_components_ = min(n_samples, n_features)
290
+ else:
291
+ self.n_components_ = self.components_.shape[0]
292
+ elif not self.n_components <= n_features:
293
+ raise ValueError(
294
+ "n_components=%r invalid for n_features=%d, need "
295
+ "more rows than columns for IncrementalPCA "
296
+ "processing" % (self.n_components, n_features)
297
+ )
298
+ elif not self.n_components <= n_samples:
299
+ raise ValueError(
300
+ "n_components=%r must be less or equal to "
301
+ "the batch number of samples "
302
+ "%d." % (self.n_components, n_samples)
303
+ )
304
+ else:
305
+ self.n_components_ = self.n_components
306
+
307
+ if (self.components_ is not None) and (
308
+ self.components_.shape[0] != self.n_components_
309
+ ):
310
+ raise ValueError(
311
+ "Number of input features has changed from %i "
312
+ "to %i between calls to partial_fit! Try "
313
+ "setting n_components to a fixed value."
314
+ % (self.components_.shape[0], self.n_components_)
315
+ )
316
+
317
+ # This is the first partial_fit
318
+ if not hasattr(self, "n_samples_seen_"):
319
+ self.n_samples_seen_ = 0
320
+ self.mean_ = 0.0
321
+ self.var_ = 0.0
322
+
323
+ # Update stats - they are 0 if this is the first step
324
+ col_mean, col_var, n_total_samples = _incremental_mean_and_var(
325
+ X,
326
+ last_mean=self.mean_,
327
+ last_variance=self.var_,
328
+ last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
329
+ )
330
+ n_total_samples = n_total_samples[0]
331
+
332
+ # Whitening
333
+ if self.n_samples_seen_ == 0:
334
+ # If it is the first step, simply whiten X
335
+ X -= col_mean
336
+ else:
337
+ col_batch_mean = np.mean(X, axis=0)
338
+ X -= col_batch_mean
339
+ # Build matrix of combined previous basis and new data
340
+ mean_correction = np.sqrt(
341
+ (self.n_samples_seen_ / n_total_samples) * n_samples
342
+ ) * (self.mean_ - col_batch_mean)
343
+ X = np.vstack(
344
+ (
345
+ self.singular_values_.reshape((-1, 1)) * self.components_,
346
+ X,
347
+ mean_correction,
348
+ )
349
+ )
350
+
351
+ U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
352
+ U, Vt = svd_flip(U, Vt, u_based_decision=False)
353
+ explained_variance = S**2 / (n_total_samples - 1)
354
+ explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples)
355
+
356
+ self.n_samples_seen_ = n_total_samples
357
+ self.components_ = Vt[: self.n_components_]
358
+ self.singular_values_ = S[: self.n_components_]
359
+ self.mean_ = col_mean
360
+ self.var_ = col_var
361
+ self.explained_variance_ = explained_variance[: self.n_components_]
362
+ self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
363
+ # we already checked `self.n_components <= n_samples` above
364
+ if self.n_components_ not in (n_samples, n_features):
365
+ self.noise_variance_ = explained_variance[self.n_components_ :].mean()
366
+ else:
367
+ self.noise_variance_ = 0.0
368
+ return self
369
+
370
+ def transform(self, X):
371
+ """Apply dimensionality reduction to X.
372
+
373
+ X is projected on the first principal components previously extracted
374
+ from a training set, using minibatches of size batch_size if X is
375
+ sparse.
376
+
377
+ Parameters
378
+ ----------
379
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
380
+ New data, where `n_samples` is the number of samples
381
+ and `n_features` is the number of features.
382
+
383
+ Returns
384
+ -------
385
+ X_new : ndarray of shape (n_samples, n_components)
386
+ Projection of X in the first principal components.
387
+
388
+ Examples
389
+ --------
390
+
391
+ >>> import numpy as np
392
+ >>> from sklearn.decomposition import IncrementalPCA
393
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
394
+ ... [1, 1], [2, 1], [3, 2]])
395
+ >>> ipca = IncrementalPCA(n_components=2, batch_size=3)
396
+ >>> ipca.fit(X)
397
+ IncrementalPCA(batch_size=3, n_components=2)
398
+ >>> ipca.transform(X) # doctest: +SKIP
399
+ """
400
+ if sparse.issparse(X):
401
+ n_samples = X.shape[0]
402
+ output = []
403
+ for batch in gen_batches(
404
+ n_samples, self.batch_size_, min_batch_size=self.n_components or 0
405
+ ):
406
+ output.append(super().transform(X[batch].toarray()))
407
+ return np.vstack(output)
408
+ else:
409
+ return super().transform(X)
venv/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Kernel Principal Components Analysis."""
2
+
3
+ # Author: Mathieu Blondel <[email protected]>
4
+ # Sylvain Marie <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ from scipy import linalg
11
+ from scipy.linalg import eigh
12
+ from scipy.sparse.linalg import eigsh
13
+
14
+ from ..base import (
15
+ BaseEstimator,
16
+ ClassNamePrefixFeaturesOutMixin,
17
+ TransformerMixin,
18
+ _fit_context,
19
+ )
20
+ from ..exceptions import NotFittedError
21
+ from ..metrics.pairwise import pairwise_kernels
22
+ from ..preprocessing import KernelCenterer
23
+ from ..utils._arpack import _init_arpack_v0
24
+ from ..utils._param_validation import Interval, StrOptions
25
+ from ..utils.extmath import _randomized_eigsh, svd_flip
26
+ from ..utils.validation import (
27
+ _check_psd_eigenvalues,
28
+ check_is_fitted,
29
+ )
30
+
31
+
32
+ class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
33
+ """Kernel Principal component analysis (KPCA) [1]_.
34
+
35
+ Non-linear dimensionality reduction through the use of kernels (see
36
+ :ref:`metrics`).
37
+
38
+ It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD
39
+ or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the
40
+ truncated SVD, depending on the shape of the input data and the number of
41
+ components to extract. It can also use a randomized truncated SVD by the
42
+ method proposed in [3]_, see `eigen_solver`.
43
+
44
+ For a usage example, see
45
+ :ref:`sphx_glr_auto_examples_decomposition_plot_kernel_pca.py`.
46
+
47
+ Read more in the :ref:`User Guide <kernel_PCA>`.
48
+
49
+ Parameters
50
+ ----------
51
+ n_components : int, default=None
52
+ Number of components. If None, all non-zero components are kept.
53
+
54
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \
55
+ or callable, default='linear'
56
+ Kernel used for PCA.
57
+
58
+ gamma : float, default=None
59
+ Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
60
+ kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``.
61
+
62
+ degree : float, default=3
63
+ Degree for poly kernels. Ignored by other kernels.
64
+
65
+ coef0 : float, default=1
66
+ Independent term in poly and sigmoid kernels.
67
+ Ignored by other kernels.
68
+
69
+ kernel_params : dict, default=None
70
+ Parameters (keyword arguments) and
71
+ values for kernel passed as callable object.
72
+ Ignored by other kernels.
73
+
74
+ alpha : float, default=1.0
75
+ Hyperparameter of the ridge regression that learns the
76
+ inverse transform (when fit_inverse_transform=True).
77
+
78
+ fit_inverse_transform : bool, default=False
79
+ Learn the inverse transform for non-precomputed kernels
80
+ (i.e. learn to find the pre-image of a point). This method is based
81
+ on [2]_.
82
+
83
+ eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \
84
+ default='auto'
85
+ Select eigensolver to use. If `n_components` is much
86
+ less than the number of training samples, randomized (or arpack to a
87
+ smaller extent) may be more efficient than the dense eigensolver.
88
+ Randomized SVD is performed according to the method of Halko et al
89
+ [3]_.
90
+
91
+ auto :
92
+ the solver is selected by a default policy based on n_samples
93
+ (the number of training samples) and `n_components`:
94
+ if the number of components to extract is less than 10 (strict) and
95
+ the number of samples is more than 200 (strict), the 'arpack'
96
+ method is enabled. Otherwise the exact full eigenvalue
97
+ decomposition is computed and optionally truncated afterwards
98
+ ('dense' method).
99
+ dense :
100
+ run exact full eigenvalue decomposition calling the standard
101
+ LAPACK solver via `scipy.linalg.eigh`, and select the components
102
+ by postprocessing
103
+ arpack :
104
+ run SVD truncated to n_components calling ARPACK solver using
105
+ `scipy.sparse.linalg.eigsh`. It requires strictly
106
+ 0 < n_components < n_samples
107
+ randomized :
108
+ run randomized SVD by the method of Halko et al. [3]_. The current
109
+ implementation selects eigenvalues based on their module; therefore
110
+ using this method can lead to unexpected results if the kernel is
111
+ not positive semi-definite. See also [4]_.
112
+
113
+ .. versionchanged:: 1.0
114
+ `'randomized'` was added.
115
+
116
+ tol : float, default=0
117
+ Convergence tolerance for arpack.
118
+ If 0, optimal value will be chosen by arpack.
119
+
120
+ max_iter : int, default=None
121
+ Maximum number of iterations for arpack.
122
+ If None, optimal value will be chosen by arpack.
123
+
124
+ iterated_power : int >= 0, or 'auto', default='auto'
125
+ Number of iterations for the power method computed by
126
+ svd_solver == 'randomized'. When 'auto', it is set to 7 when
127
+ `n_components < 0.1 * min(X.shape)`, other it is set to 4.
128
+
129
+ .. versionadded:: 1.0
130
+
131
+ remove_zero_eig : bool, default=False
132
+ If True, then all components with zero eigenvalues are removed, so
133
+ that the number of components in the output may be < n_components
134
+ (and sometimes even zero due to numerical instability).
135
+ When n_components is None, this parameter is ignored and components
136
+ with zero eigenvalues are removed regardless.
137
+
138
+ random_state : int, RandomState instance or None, default=None
139
+ Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int
140
+ for reproducible results across multiple function calls.
141
+ See :term:`Glossary <random_state>`.
142
+
143
+ .. versionadded:: 0.18
144
+
145
+ copy_X : bool, default=True
146
+ If True, input X is copied and stored by the model in the `X_fit_`
147
+ attribute. If no further changes will be done to X, setting
148
+ `copy_X=False` saves memory by storing a reference.
149
+
150
+ .. versionadded:: 0.18
151
+
152
+ n_jobs : int, default=None
153
+ The number of parallel jobs to run.
154
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
155
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
156
+ for more details.
157
+
158
+ .. versionadded:: 0.18
159
+
160
+ Attributes
161
+ ----------
162
+ eigenvalues_ : ndarray of shape (n_components,)
163
+ Eigenvalues of the centered kernel matrix in decreasing order.
164
+ If `n_components` and `remove_zero_eig` are not set,
165
+ then all values are stored.
166
+
167
+ eigenvectors_ : ndarray of shape (n_samples, n_components)
168
+ Eigenvectors of the centered kernel matrix. If `n_components` and
169
+ `remove_zero_eig` are not set, then all components are stored.
170
+
171
+ dual_coef_ : ndarray of shape (n_samples, n_features)
172
+ Inverse transform matrix. Only available when
173
+ ``fit_inverse_transform`` is True.
174
+
175
+ X_transformed_fit_ : ndarray of shape (n_samples, n_components)
176
+ Projection of the fitted data on the kernel principal components.
177
+ Only available when ``fit_inverse_transform`` is True.
178
+
179
+ X_fit_ : ndarray of shape (n_samples, n_features)
180
+ The data used to fit the model. If `copy_X=False`, then `X_fit_` is
181
+ a reference. This attribute is used for the calls to transform.
182
+
183
+ n_features_in_ : int
184
+ Number of features seen during :term:`fit`.
185
+
186
+ .. versionadded:: 0.24
187
+
188
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
189
+ Names of features seen during :term:`fit`. Defined only when `X`
190
+ has feature names that are all strings.
191
+
192
+ .. versionadded:: 1.0
193
+
194
+ gamma_ : float
195
+ Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma`
196
+ is explicitly provided, this is just the same as `gamma`. When `gamma`
197
+ is `None`, this is the actual value of kernel coefficient.
198
+
199
+ .. versionadded:: 1.3
200
+
201
+ See Also
202
+ --------
203
+ FastICA : A fast algorithm for Independent Component Analysis.
204
+ IncrementalPCA : Incremental Principal Component Analysis.
205
+ NMF : Non-Negative Matrix Factorization.
206
+ PCA : Principal Component Analysis.
207
+ SparsePCA : Sparse Principal Component Analysis.
208
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
209
+
210
+ References
211
+ ----------
212
+ .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
213
+ "Kernel principal component analysis."
214
+ International conference on artificial neural networks.
215
+ Springer, Berlin, Heidelberg, 1997.
216
+ <https://people.eecs.berkeley.edu/~wainwrig/stat241b/scholkopf_kernel.pdf>`_
217
+
218
+ .. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
219
+ "Learning to find pre-images."
220
+ Advances in neural information processing systems 16 (2004): 449-456.
221
+ <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
222
+
223
+ .. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp.
224
+ "Finding structure with randomness: Probabilistic algorithms for
225
+ constructing approximate matrix decompositions."
226
+ SIAM review 53.2 (2011): 217-288. <0909.4061>`
227
+
228
+ .. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert.
229
+ "A randomized algorithm for the decomposition of matrices."
230
+ Applied and Computational Harmonic Analysis 30.1 (2011): 47-68.
231
+ <https://www.sciencedirect.com/science/article/pii/S1063520310000242>`_
232
+
233
+ Examples
234
+ --------
235
+ >>> from sklearn.datasets import load_digits
236
+ >>> from sklearn.decomposition import KernelPCA
237
+ >>> X, _ = load_digits(return_X_y=True)
238
+ >>> transformer = KernelPCA(n_components=7, kernel='linear')
239
+ >>> X_transformed = transformer.fit_transform(X)
240
+ >>> X_transformed.shape
241
+ (1797, 7)
242
+ """
243
+
244
+ _parameter_constraints: dict = {
245
+ "n_components": [
246
+ Interval(Integral, 1, None, closed="left"),
247
+ None,
248
+ ],
249
+ "kernel": [
250
+ StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}),
251
+ callable,
252
+ ],
253
+ "gamma": [
254
+ Interval(Real, 0, None, closed="left"),
255
+ None,
256
+ ],
257
+ "degree": [Interval(Real, 0, None, closed="left")],
258
+ "coef0": [Interval(Real, None, None, closed="neither")],
259
+ "kernel_params": [dict, None],
260
+ "alpha": [Interval(Real, 0, None, closed="left")],
261
+ "fit_inverse_transform": ["boolean"],
262
+ "eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})],
263
+ "tol": [Interval(Real, 0, None, closed="left")],
264
+ "max_iter": [
265
+ Interval(Integral, 1, None, closed="left"),
266
+ None,
267
+ ],
268
+ "iterated_power": [
269
+ Interval(Integral, 0, None, closed="left"),
270
+ StrOptions({"auto"}),
271
+ ],
272
+ "remove_zero_eig": ["boolean"],
273
+ "random_state": ["random_state"],
274
+ "copy_X": ["boolean"],
275
+ "n_jobs": [None, Integral],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ n_components=None,
281
+ *,
282
+ kernel="linear",
283
+ gamma=None,
284
+ degree=3,
285
+ coef0=1,
286
+ kernel_params=None,
287
+ alpha=1.0,
288
+ fit_inverse_transform=False,
289
+ eigen_solver="auto",
290
+ tol=0,
291
+ max_iter=None,
292
+ iterated_power="auto",
293
+ remove_zero_eig=False,
294
+ random_state=None,
295
+ copy_X=True,
296
+ n_jobs=None,
297
+ ):
298
+ self.n_components = n_components
299
+ self.kernel = kernel
300
+ self.kernel_params = kernel_params
301
+ self.gamma = gamma
302
+ self.degree = degree
303
+ self.coef0 = coef0
304
+ self.alpha = alpha
305
+ self.fit_inverse_transform = fit_inverse_transform
306
+ self.eigen_solver = eigen_solver
307
+ self.tol = tol
308
+ self.max_iter = max_iter
309
+ self.iterated_power = iterated_power
310
+ self.remove_zero_eig = remove_zero_eig
311
+ self.random_state = random_state
312
+ self.n_jobs = n_jobs
313
+ self.copy_X = copy_X
314
+
315
+ def _get_kernel(self, X, Y=None):
316
+ if callable(self.kernel):
317
+ params = self.kernel_params or {}
318
+ else:
319
+ params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0}
320
+ return pairwise_kernels(
321
+ X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params
322
+ )
323
+
324
+ def _fit_transform(self, K):
325
+ """Fit's using kernel K"""
326
+ # center kernel
327
+ K = self._centerer.fit_transform(K)
328
+
329
+ # adjust n_components according to user inputs
330
+ if self.n_components is None:
331
+ n_components = K.shape[0] # use all dimensions
332
+ else:
333
+ n_components = min(K.shape[0], self.n_components)
334
+
335
+ # compute eigenvectors
336
+ if self.eigen_solver == "auto":
337
+ if K.shape[0] > 200 and n_components < 10:
338
+ eigen_solver = "arpack"
339
+ else:
340
+ eigen_solver = "dense"
341
+ else:
342
+ eigen_solver = self.eigen_solver
343
+
344
+ if eigen_solver == "dense":
345
+ # Note: subset_by_index specifies the indices of smallest/largest to return
346
+ self.eigenvalues_, self.eigenvectors_ = eigh(
347
+ K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1)
348
+ )
349
+ elif eigen_solver == "arpack":
350
+ v0 = _init_arpack_v0(K.shape[0], self.random_state)
351
+ self.eigenvalues_, self.eigenvectors_ = eigsh(
352
+ K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0
353
+ )
354
+ elif eigen_solver == "randomized":
355
+ self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh(
356
+ K,
357
+ n_components=n_components,
358
+ n_iter=self.iterated_power,
359
+ random_state=self.random_state,
360
+ selection="module",
361
+ )
362
+
363
+ # make sure that the eigenvalues are ok and fix numerical issues
364
+ self.eigenvalues_ = _check_psd_eigenvalues(
365
+ self.eigenvalues_, enable_warnings=False
366
+ )
367
+
368
+ # flip eigenvectors' sign to enforce deterministic output
369
+ self.eigenvectors_, _ = svd_flip(
370
+ self.eigenvectors_, np.zeros_like(self.eigenvectors_).T
371
+ )
372
+
373
+ # sort eigenvectors in descending order
374
+ indices = self.eigenvalues_.argsort()[::-1]
375
+ self.eigenvalues_ = self.eigenvalues_[indices]
376
+ self.eigenvectors_ = self.eigenvectors_[:, indices]
377
+
378
+ # remove eigenvectors with a zero eigenvalue (null space) if required
379
+ if self.remove_zero_eig or self.n_components is None:
380
+ self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0]
381
+ self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0]
382
+
383
+ # Maintenance note on Eigenvectors normalization
384
+ # ----------------------------------------------
385
+ # there is a link between
386
+ # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)'
387
+ # if v is an eigenvector of K
388
+ # then Phi(X)v is an eigenvector of Phi(X)Phi(X)'
389
+ # if u is an eigenvector of Phi(X)Phi(X)'
390
+ # then Phi(X)'u is an eigenvector of Phi(X)'Phi(X)
391
+ #
392
+ # At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale
393
+ # them so that eigenvectors in kernel feature space (the u) have norm=1
394
+ # instead
395
+ #
396
+ # We COULD scale them here:
397
+ # self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_)
398
+ #
399
+ # But choose to perform that LATER when needed, in `fit()` and in
400
+ # `transform()`.
401
+
402
+ return K
403
+
404
+ def _fit_inverse_transform(self, X_transformed, X):
405
+ if hasattr(X, "tocsr"):
406
+ raise NotImplementedError(
407
+ "Inverse transform not implemented for sparse matrices!"
408
+ )
409
+
410
+ n_samples = X_transformed.shape[0]
411
+ K = self._get_kernel(X_transformed)
412
+ K.flat[:: n_samples + 1] += self.alpha
413
+ self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True)
414
+ self.X_transformed_fit_ = X_transformed
415
+
416
+ @_fit_context(prefer_skip_nested_validation=True)
417
+ def fit(self, X, y=None):
418
+ """Fit the model from data in X.
419
+
420
+ Parameters
421
+ ----------
422
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
423
+ Training vector, where `n_samples` is the number of samples
424
+ and `n_features` is the number of features.
425
+
426
+ y : Ignored
427
+ Not used, present for API consistency by convention.
428
+
429
+ Returns
430
+ -------
431
+ self : object
432
+ Returns the instance itself.
433
+ """
434
+ if self.fit_inverse_transform and self.kernel == "precomputed":
435
+ raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.")
436
+ X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X)
437
+ self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma
438
+ self._centerer = KernelCenterer().set_output(transform="default")
439
+ K = self._get_kernel(X)
440
+ self._fit_transform(K)
441
+
442
+ if self.fit_inverse_transform:
443
+ # no need to use the kernel to transform X, use shortcut expression
444
+ X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
445
+
446
+ self._fit_inverse_transform(X_transformed, X)
447
+
448
+ self.X_fit_ = X
449
+ return self
450
+
451
+ def fit_transform(self, X, y=None, **params):
452
+ """Fit the model from data in X and transform X.
453
+
454
+ Parameters
455
+ ----------
456
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
457
+ Training vector, where `n_samples` is the number of samples
458
+ and `n_features` is the number of features.
459
+
460
+ y : Ignored
461
+ Not used, present for API consistency by convention.
462
+
463
+ **params : kwargs
464
+ Parameters (keyword arguments) and values passed to
465
+ the fit_transform instance.
466
+
467
+ Returns
468
+ -------
469
+ X_new : ndarray of shape (n_samples, n_components)
470
+ Returns the instance itself.
471
+ """
472
+ self.fit(X, **params)
473
+
474
+ # no need to use the kernel to transform X, use shortcut expression
475
+ X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
476
+
477
+ if self.fit_inverse_transform:
478
+ self._fit_inverse_transform(X_transformed, X)
479
+
480
+ return X_transformed
481
+
482
+ def transform(self, X):
483
+ """Transform X.
484
+
485
+ Parameters
486
+ ----------
487
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
488
+ Training vector, where `n_samples` is the number of samples
489
+ and `n_features` is the number of features.
490
+
491
+ Returns
492
+ -------
493
+ X_new : ndarray of shape (n_samples, n_components)
494
+ Returns the instance itself.
495
+ """
496
+ check_is_fitted(self)
497
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
498
+
499
+ # Compute centered gram matrix between X and training data X_fit_
500
+ K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
501
+
502
+ # scale eigenvectors (properly account for null-space for dot product)
503
+ non_zeros = np.flatnonzero(self.eigenvalues_)
504
+ scaled_alphas = np.zeros_like(self.eigenvectors_)
505
+ scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt(
506
+ self.eigenvalues_[non_zeros]
507
+ )
508
+
509
+ # Project with a scalar product between K and the scaled eigenvectors
510
+ return np.dot(K, scaled_alphas)
511
+
512
+ def inverse_transform(self, X):
513
+ """Transform X back to original space.
514
+
515
+ ``inverse_transform`` approximates the inverse transformation using
516
+ a learned pre-image. The pre-image is learned by kernel ridge
517
+ regression of the original data on their low-dimensional representation
518
+ vectors.
519
+
520
+ .. note:
521
+ :meth:`~sklearn.decomposition.fit` internally uses a centered
522
+ kernel. As the centered kernel no longer contains the information
523
+ of the mean of kernel features, such information is not taken into
524
+ account in reconstruction.
525
+
526
+ .. note::
527
+ When users want to compute inverse transformation for 'linear'
528
+ kernel, it is recommended that they use
529
+ :class:`~sklearn.decomposition.PCA` instead. Unlike
530
+ :class:`~sklearn.decomposition.PCA`,
531
+ :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform``
532
+ does not reconstruct the mean of data when 'linear' kernel is used
533
+ due to the use of centered kernel.
534
+
535
+ Parameters
536
+ ----------
537
+ X : {array-like, sparse matrix} of shape (n_samples, n_components)
538
+ Training vector, where `n_samples` is the number of samples
539
+ and `n_features` is the number of features.
540
+
541
+ Returns
542
+ -------
543
+ X_new : ndarray of shape (n_samples, n_features)
544
+ Returns the instance itself.
545
+
546
+ References
547
+ ----------
548
+ `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
549
+ "Learning to find pre-images."
550
+ Advances in neural information processing systems 16 (2004): 449-456.
551
+ <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
552
+ """
553
+ if not self.fit_inverse_transform:
554
+ raise NotFittedError(
555
+ "The fit_inverse_transform parameter was not"
556
+ " set to True when instantiating and hence "
557
+ "the inverse transform is not available."
558
+ )
559
+
560
+ K = self._get_kernel(X, self.X_transformed_fit_)
561
+ return np.dot(K, self.dual_coef_)
562
+
563
+ def _more_tags(self):
564
+ return {
565
+ "preserves_dtype": [np.float64, np.float32],
566
+ "pairwise": self.kernel == "precomputed",
567
+ }
568
+
569
+ @property
570
+ def _n_features_out(self):
571
+ """Number of transformed output features."""
572
+ return self.eigenvalues_.shape[0]
venv/lib/python3.10/site-packages/sklearn/decomposition/_lda.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ =============================================================
4
+ Online Latent Dirichlet Allocation with variational inference
5
+ =============================================================
6
+
7
+ This implementation is modified from Matthew D. Hoffman's onlineldavb code
8
+ Link: https://github.com/blei-lab/onlineldavb
9
+ """
10
+
11
+ # Author: Chyi-Kwei Yau
12
+ # Author: Matthew D. Hoffman (original onlineldavb implementation)
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ import scipy.sparse as sp
17
+ from joblib import effective_n_jobs
18
+ from scipy.special import gammaln, logsumexp
19
+
20
+ from ..base import (
21
+ BaseEstimator,
22
+ ClassNamePrefixFeaturesOutMixin,
23
+ TransformerMixin,
24
+ _fit_context,
25
+ )
26
+ from ..utils import check_random_state, gen_batches, gen_even_slices
27
+ from ..utils._param_validation import Interval, StrOptions
28
+ from ..utils.parallel import Parallel, delayed
29
+ from ..utils.validation import check_is_fitted, check_non_negative
30
+ from ._online_lda_fast import (
31
+ _dirichlet_expectation_1d as cy_dirichlet_expectation_1d,
32
+ )
33
+ from ._online_lda_fast import (
34
+ _dirichlet_expectation_2d,
35
+ )
36
+ from ._online_lda_fast import (
37
+ mean_change as cy_mean_change,
38
+ )
39
+
40
+ EPS = np.finfo(float).eps
41
+
42
+
43
+ def _update_doc_distribution(
44
+ X,
45
+ exp_topic_word_distr,
46
+ doc_topic_prior,
47
+ max_doc_update_iter,
48
+ mean_change_tol,
49
+ cal_sstats,
50
+ random_state,
51
+ ):
52
+ """E-step: update document-topic distribution.
53
+
54
+ Parameters
55
+ ----------
56
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
57
+ Document word matrix.
58
+
59
+ exp_topic_word_distr : ndarray of shape (n_topics, n_features)
60
+ Exponential value of expectation of log topic word distribution.
61
+ In the literature, this is `exp(E[log(beta)])`.
62
+
63
+ doc_topic_prior : float
64
+ Prior of document topic distribution `theta`.
65
+
66
+ max_doc_update_iter : int
67
+ Max number of iterations for updating document topic distribution in
68
+ the E-step.
69
+
70
+ mean_change_tol : float
71
+ Stopping tolerance for updating document topic distribution in E-step.
72
+
73
+ cal_sstats : bool
74
+ Parameter that indicate to calculate sufficient statistics or not.
75
+ Set `cal_sstats` to `True` when we need to run M-step.
76
+
77
+ random_state : RandomState instance or None
78
+ Parameter that indicate how to initialize document topic distribution.
79
+ Set `random_state` to None will initialize document topic distribution
80
+ to a constant number.
81
+
82
+ Returns
83
+ -------
84
+ (doc_topic_distr, suff_stats) :
85
+ `doc_topic_distr` is unnormalized topic distribution for each document.
86
+ In the literature, this is `gamma`. we can calculate `E[log(theta)]`
87
+ from it.
88
+ `suff_stats` is expected sufficient statistics for the M-step.
89
+ When `cal_sstats == False`, this will be None.
90
+
91
+ """
92
+ is_sparse_x = sp.issparse(X)
93
+ n_samples, n_features = X.shape
94
+ n_topics = exp_topic_word_distr.shape[0]
95
+
96
+ if random_state:
97
+ doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype(
98
+ X.dtype, copy=False
99
+ )
100
+ else:
101
+ doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype)
102
+
103
+ # In the literature, this is `exp(E[log(theta)])`
104
+ exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
105
+
106
+ # diff on `component_` (only calculate it when `cal_diff` is True)
107
+ suff_stats = (
108
+ np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None
109
+ )
110
+
111
+ if is_sparse_x:
112
+ X_data = X.data
113
+ X_indices = X.indices
114
+ X_indptr = X.indptr
115
+
116
+ # These cython functions are called in a nested loop on usually very small arrays
117
+ # (length=n_topics). In that case, finding the appropriate signature of the
118
+ # fused-typed function can be more costly than its execution, hence the dispatch
119
+ # is done outside of the loop.
120
+ ctype = "float" if X.dtype == np.float32 else "double"
121
+ mean_change = cy_mean_change[ctype]
122
+ dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype]
123
+ eps = np.finfo(X.dtype).eps
124
+
125
+ for idx_d in range(n_samples):
126
+ if is_sparse_x:
127
+ ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
128
+ cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
129
+ else:
130
+ ids = np.nonzero(X[idx_d, :])[0]
131
+ cnts = X[idx_d, ids]
132
+
133
+ doc_topic_d = doc_topic_distr[idx_d, :]
134
+ # The next one is a copy, since the inner loop overwrites it.
135
+ exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
136
+ exp_topic_word_d = exp_topic_word_distr[:, ids]
137
+
138
+ # Iterate between `doc_topic_d` and `norm_phi` until convergence
139
+ for _ in range(0, max_doc_update_iter):
140
+ last_d = doc_topic_d
141
+
142
+ # The optimal phi_{dwk} is proportional to
143
+ # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
144
+ norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
145
+
146
+ doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T)
147
+ # Note: adds doc_topic_prior to doc_topic_d, in-place.
148
+ dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d)
149
+
150
+ if mean_change(last_d, doc_topic_d) < mean_change_tol:
151
+ break
152
+ doc_topic_distr[idx_d, :] = doc_topic_d
153
+
154
+ # Contribution of document d to the expected sufficient
155
+ # statistics for the M step.
156
+ if cal_sstats:
157
+ norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
158
+ suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
159
+
160
+ return (doc_topic_distr, suff_stats)
161
+
162
+
163
+ class LatentDirichletAllocation(
164
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
165
+ ):
166
+ """Latent Dirichlet Allocation with online variational Bayes algorithm.
167
+
168
+ The implementation is based on [1]_ and [2]_.
169
+
170
+ .. versionadded:: 0.17
171
+
172
+ Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
173
+
174
+ Parameters
175
+ ----------
176
+ n_components : int, default=10
177
+ Number of topics.
178
+
179
+ .. versionchanged:: 0.19
180
+ ``n_topics`` was renamed to ``n_components``
181
+
182
+ doc_topic_prior : float, default=None
183
+ Prior of document topic distribution `theta`. If the value is None,
184
+ defaults to `1 / n_components`.
185
+ In [1]_, this is called `alpha`.
186
+
187
+ topic_word_prior : float, default=None
188
+ Prior of topic word distribution `beta`. If the value is None, defaults
189
+ to `1 / n_components`.
190
+ In [1]_, this is called `eta`.
191
+
192
+ learning_method : {'batch', 'online'}, default='batch'
193
+ Method used to update `_component`. Only used in :meth:`fit` method.
194
+ In general, if the data size is large, the online update will be much
195
+ faster than the batch update.
196
+
197
+ Valid options::
198
+
199
+ 'batch': Batch variational Bayes method. Use all training data in
200
+ each EM update.
201
+ Old `components_` will be overwritten in each iteration.
202
+ 'online': Online variational Bayes method. In each EM update, use
203
+ mini-batch of training data to update the ``components_``
204
+ variable incrementally. The learning rate is controlled by the
205
+ ``learning_decay`` and the ``learning_offset`` parameters.
206
+
207
+ .. versionchanged:: 0.20
208
+ The default learning method is now ``"batch"``.
209
+
210
+ learning_decay : float, default=0.7
211
+ It is a parameter that control learning rate in the online learning
212
+ method. The value should be set between (0.5, 1.0] to guarantee
213
+ asymptotic convergence. When the value is 0.0 and batch_size is
214
+ ``n_samples``, the update method is same as batch learning. In the
215
+ literature, this is called kappa.
216
+
217
+ learning_offset : float, default=10.0
218
+ A (positive) parameter that downweights early iterations in online
219
+ learning. It should be greater than 1.0. In the literature, this is
220
+ called tau_0.
221
+
222
+ max_iter : int, default=10
223
+ The maximum number of passes over the training data (aka epochs).
224
+ It only impacts the behavior in the :meth:`fit` method, and not the
225
+ :meth:`partial_fit` method.
226
+
227
+ batch_size : int, default=128
228
+ Number of documents to use in each EM iteration. Only used in online
229
+ learning.
230
+
231
+ evaluate_every : int, default=-1
232
+ How often to evaluate perplexity. Only used in `fit` method.
233
+ set it to 0 or negative number to not evaluate perplexity in
234
+ training at all. Evaluating perplexity can help you check convergence
235
+ in training process, but it will also increase total training time.
236
+ Evaluating perplexity in every iteration might increase training time
237
+ up to two-fold.
238
+
239
+ total_samples : int, default=1e6
240
+ Total number of documents. Only used in the :meth:`partial_fit` method.
241
+
242
+ perp_tol : float, default=1e-1
243
+ Perplexity tolerance in batch learning. Only used when
244
+ ``evaluate_every`` is greater than 0.
245
+
246
+ mean_change_tol : float, default=1e-3
247
+ Stopping tolerance for updating document topic distribution in E-step.
248
+
249
+ max_doc_update_iter : int, default=100
250
+ Max number of iterations for updating document topic distribution in
251
+ the E-step.
252
+
253
+ n_jobs : int, default=None
254
+ The number of jobs to use in the E-step.
255
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
256
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
257
+ for more details.
258
+
259
+ verbose : int, default=0
260
+ Verbosity level.
261
+
262
+ random_state : int, RandomState instance or None, default=None
263
+ Pass an int for reproducible results across multiple function calls.
264
+ See :term:`Glossary <random_state>`.
265
+
266
+ Attributes
267
+ ----------
268
+ components_ : ndarray of shape (n_components, n_features)
269
+ Variational parameters for topic word distribution. Since the complete
270
+ conditional for topic word distribution is a Dirichlet,
271
+ ``components_[i, j]`` can be viewed as pseudocount that represents the
272
+ number of times word `j` was assigned to topic `i`.
273
+ It can also be viewed as distribution over the words for each topic
274
+ after normalization:
275
+ ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
276
+
277
+ exp_dirichlet_component_ : ndarray of shape (n_components, n_features)
278
+ Exponential value of expectation of log topic word distribution.
279
+ In the literature, this is `exp(E[log(beta)])`.
280
+
281
+ n_batch_iter_ : int
282
+ Number of iterations of the EM step.
283
+
284
+ n_features_in_ : int
285
+ Number of features seen during :term:`fit`.
286
+
287
+ .. versionadded:: 0.24
288
+
289
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
290
+ Names of features seen during :term:`fit`. Defined only when `X`
291
+ has feature names that are all strings.
292
+
293
+ .. versionadded:: 1.0
294
+
295
+ n_iter_ : int
296
+ Number of passes over the dataset.
297
+
298
+ bound_ : float
299
+ Final perplexity score on training set.
300
+
301
+ doc_topic_prior_ : float
302
+ Prior of document topic distribution `theta`. If the value is None,
303
+ it is `1 / n_components`.
304
+
305
+ random_state_ : RandomState instance
306
+ RandomState instance that is generated either from a seed, the random
307
+ number generator or by `np.random`.
308
+
309
+ topic_word_prior_ : float
310
+ Prior of topic word distribution `beta`. If the value is None, it is
311
+ `1 / n_components`.
312
+
313
+ See Also
314
+ --------
315
+ sklearn.discriminant_analysis.LinearDiscriminantAnalysis:
316
+ A classifier with a linear decision boundary, generated by fitting
317
+ class conditional densities to the data and using Bayes' rule.
318
+
319
+ References
320
+ ----------
321
+ .. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D.
322
+ Hoffman, David M. Blei, Francis Bach, 2010
323
+ https://github.com/blei-lab/onlineldavb
324
+
325
+ .. [2] "Stochastic Variational Inference", Matthew D. Hoffman,
326
+ David M. Blei, Chong Wang, John Paisley, 2013
327
+
328
+ Examples
329
+ --------
330
+ >>> from sklearn.decomposition import LatentDirichletAllocation
331
+ >>> from sklearn.datasets import make_multilabel_classification
332
+ >>> # This produces a feature matrix of token counts, similar to what
333
+ >>> # CountVectorizer would produce on text.
334
+ >>> X, _ = make_multilabel_classification(random_state=0)
335
+ >>> lda = LatentDirichletAllocation(n_components=5,
336
+ ... random_state=0)
337
+ >>> lda.fit(X)
338
+ LatentDirichletAllocation(...)
339
+ >>> # get topics for some given samples:
340
+ >>> lda.transform(X[-2:])
341
+ array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846],
342
+ [0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]])
343
+ """
344
+
345
+ _parameter_constraints: dict = {
346
+ "n_components": [Interval(Integral, 0, None, closed="neither")],
347
+ "doc_topic_prior": [None, Interval(Real, 0, 1, closed="both")],
348
+ "topic_word_prior": [None, Interval(Real, 0, 1, closed="both")],
349
+ "learning_method": [StrOptions({"batch", "online"})],
350
+ "learning_decay": [Interval(Real, 0, 1, closed="both")],
351
+ "learning_offset": [Interval(Real, 1.0, None, closed="left")],
352
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
353
+ "batch_size": [Interval(Integral, 0, None, closed="neither")],
354
+ "evaluate_every": [Interval(Integral, None, None, closed="neither")],
355
+ "total_samples": [Interval(Real, 0, None, closed="neither")],
356
+ "perp_tol": [Interval(Real, 0, None, closed="left")],
357
+ "mean_change_tol": [Interval(Real, 0, None, closed="left")],
358
+ "max_doc_update_iter": [Interval(Integral, 0, None, closed="left")],
359
+ "n_jobs": [None, Integral],
360
+ "verbose": ["verbose"],
361
+ "random_state": ["random_state"],
362
+ }
363
+
364
+ def __init__(
365
+ self,
366
+ n_components=10,
367
+ *,
368
+ doc_topic_prior=None,
369
+ topic_word_prior=None,
370
+ learning_method="batch",
371
+ learning_decay=0.7,
372
+ learning_offset=10.0,
373
+ max_iter=10,
374
+ batch_size=128,
375
+ evaluate_every=-1,
376
+ total_samples=1e6,
377
+ perp_tol=1e-1,
378
+ mean_change_tol=1e-3,
379
+ max_doc_update_iter=100,
380
+ n_jobs=None,
381
+ verbose=0,
382
+ random_state=None,
383
+ ):
384
+ self.n_components = n_components
385
+ self.doc_topic_prior = doc_topic_prior
386
+ self.topic_word_prior = topic_word_prior
387
+ self.learning_method = learning_method
388
+ self.learning_decay = learning_decay
389
+ self.learning_offset = learning_offset
390
+ self.max_iter = max_iter
391
+ self.batch_size = batch_size
392
+ self.evaluate_every = evaluate_every
393
+ self.total_samples = total_samples
394
+ self.perp_tol = perp_tol
395
+ self.mean_change_tol = mean_change_tol
396
+ self.max_doc_update_iter = max_doc_update_iter
397
+ self.n_jobs = n_jobs
398
+ self.verbose = verbose
399
+ self.random_state = random_state
400
+
401
+ def _init_latent_vars(self, n_features, dtype=np.float64):
402
+ """Initialize latent variables."""
403
+
404
+ self.random_state_ = check_random_state(self.random_state)
405
+ self.n_batch_iter_ = 1
406
+ self.n_iter_ = 0
407
+
408
+ if self.doc_topic_prior is None:
409
+ self.doc_topic_prior_ = 1.0 / self.n_components
410
+ else:
411
+ self.doc_topic_prior_ = self.doc_topic_prior
412
+
413
+ if self.topic_word_prior is None:
414
+ self.topic_word_prior_ = 1.0 / self.n_components
415
+ else:
416
+ self.topic_word_prior_ = self.topic_word_prior
417
+
418
+ init_gamma = 100.0
419
+ init_var = 1.0 / init_gamma
420
+ # In the literature, this is called `lambda`
421
+ self.components_ = self.random_state_.gamma(
422
+ init_gamma, init_var, (self.n_components, n_features)
423
+ ).astype(dtype, copy=False)
424
+
425
+ # In the literature, this is `exp(E[log(beta)])`
426
+ self.exp_dirichlet_component_ = np.exp(
427
+ _dirichlet_expectation_2d(self.components_)
428
+ )
429
+
430
+ def _e_step(self, X, cal_sstats, random_init, parallel=None):
431
+ """E-step in EM update.
432
+
433
+ Parameters
434
+ ----------
435
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
436
+ Document word matrix.
437
+
438
+ cal_sstats : bool
439
+ Parameter that indicate whether to calculate sufficient statistics
440
+ or not. Set ``cal_sstats`` to True when we need to run M-step.
441
+
442
+ random_init : bool
443
+ Parameter that indicate whether to initialize document topic
444
+ distribution randomly in the E-step. Set it to True in training
445
+ steps.
446
+
447
+ parallel : joblib.Parallel, default=None
448
+ Pre-initialized instance of joblib.Parallel.
449
+
450
+ Returns
451
+ -------
452
+ (doc_topic_distr, suff_stats) :
453
+ `doc_topic_distr` is unnormalized topic distribution for each
454
+ document. In the literature, this is called `gamma`.
455
+ `suff_stats` is expected sufficient statistics for the M-step.
456
+ When `cal_sstats == False`, it will be None.
457
+
458
+ """
459
+
460
+ # Run e-step in parallel
461
+ random_state = self.random_state_ if random_init else None
462
+
463
+ # TODO: make Parallel._effective_n_jobs public instead?
464
+ n_jobs = effective_n_jobs(self.n_jobs)
465
+ if parallel is None:
466
+ parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
467
+ results = parallel(
468
+ delayed(_update_doc_distribution)(
469
+ X[idx_slice, :],
470
+ self.exp_dirichlet_component_,
471
+ self.doc_topic_prior_,
472
+ self.max_doc_update_iter,
473
+ self.mean_change_tol,
474
+ cal_sstats,
475
+ random_state,
476
+ )
477
+ for idx_slice in gen_even_slices(X.shape[0], n_jobs)
478
+ )
479
+
480
+ # merge result
481
+ doc_topics, sstats_list = zip(*results)
482
+ doc_topic_distr = np.vstack(doc_topics)
483
+
484
+ if cal_sstats:
485
+ # This step finishes computing the sufficient statistics for the
486
+ # M-step.
487
+ suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
488
+ for sstats in sstats_list:
489
+ suff_stats += sstats
490
+ suff_stats *= self.exp_dirichlet_component_
491
+ else:
492
+ suff_stats = None
493
+
494
+ return (doc_topic_distr, suff_stats)
495
+
496
+ def _em_step(self, X, total_samples, batch_update, parallel=None):
497
+ """EM update for 1 iteration.
498
+
499
+ update `_component` by batch VB or online VB.
500
+
501
+ Parameters
502
+ ----------
503
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
504
+ Document word matrix.
505
+
506
+ total_samples : int
507
+ Total number of documents. It is only used when
508
+ batch_update is `False`.
509
+
510
+ batch_update : bool
511
+ Parameter that controls updating method.
512
+ `True` for batch learning, `False` for online learning.
513
+
514
+ parallel : joblib.Parallel, default=None
515
+ Pre-initialized instance of joblib.Parallel
516
+
517
+ Returns
518
+ -------
519
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
520
+ Unnormalized document topic distribution.
521
+ """
522
+
523
+ # E-step
524
+ _, suff_stats = self._e_step(
525
+ X, cal_sstats=True, random_init=True, parallel=parallel
526
+ )
527
+
528
+ # M-step
529
+ if batch_update:
530
+ self.components_ = self.topic_word_prior_ + suff_stats
531
+ else:
532
+ # online update
533
+ # In the literature, the weight is `rho`
534
+ weight = np.power(
535
+ self.learning_offset + self.n_batch_iter_, -self.learning_decay
536
+ )
537
+ doc_ratio = float(total_samples) / X.shape[0]
538
+ self.components_ *= 1 - weight
539
+ self.components_ += weight * (
540
+ self.topic_word_prior_ + doc_ratio * suff_stats
541
+ )
542
+
543
+ # update `component_` related variables
544
+ self.exp_dirichlet_component_ = np.exp(
545
+ _dirichlet_expectation_2d(self.components_)
546
+ )
547
+ self.n_batch_iter_ += 1
548
+ return
549
+
550
+ def _more_tags(self):
551
+ return {
552
+ "preserves_dtype": [np.float64, np.float32],
553
+ "requires_positive_X": True,
554
+ }
555
+
556
+ def _check_non_neg_array(self, X, reset_n_features, whom):
557
+ """check X format
558
+
559
+ check X format and make sure no negative value in X.
560
+
561
+ Parameters
562
+ ----------
563
+ X : array-like or sparse matrix
564
+
565
+ """
566
+ dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype
567
+
568
+ X = self._validate_data(
569
+ X,
570
+ reset=reset_n_features,
571
+ accept_sparse="csr",
572
+ dtype=dtype,
573
+ )
574
+ check_non_negative(X, whom)
575
+
576
+ return X
577
+
578
+ @_fit_context(prefer_skip_nested_validation=True)
579
+ def partial_fit(self, X, y=None):
580
+ """Online VB with Mini-Batch update.
581
+
582
+ Parameters
583
+ ----------
584
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
585
+ Document word matrix.
586
+
587
+ y : Ignored
588
+ Not used, present here for API consistency by convention.
589
+
590
+ Returns
591
+ -------
592
+ self
593
+ Partially fitted estimator.
594
+ """
595
+ first_time = not hasattr(self, "components_")
596
+
597
+ X = self._check_non_neg_array(
598
+ X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit"
599
+ )
600
+ n_samples, n_features = X.shape
601
+ batch_size = self.batch_size
602
+
603
+ # initialize parameters or check
604
+ if first_time:
605
+ self._init_latent_vars(n_features, dtype=X.dtype)
606
+
607
+ if n_features != self.components_.shape[1]:
608
+ raise ValueError(
609
+ "The provided data has %d dimensions while "
610
+ "the model was trained with feature size %d."
611
+ % (n_features, self.components_.shape[1])
612
+ )
613
+
614
+ n_jobs = effective_n_jobs(self.n_jobs)
615
+ with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
616
+ for idx_slice in gen_batches(n_samples, batch_size):
617
+ self._em_step(
618
+ X[idx_slice, :],
619
+ total_samples=self.total_samples,
620
+ batch_update=False,
621
+ parallel=parallel,
622
+ )
623
+
624
+ return self
625
+
626
+ @_fit_context(prefer_skip_nested_validation=True)
627
+ def fit(self, X, y=None):
628
+ """Learn model for the data X with variational Bayes method.
629
+
630
+ When `learning_method` is 'online', use mini-batch update.
631
+ Otherwise, use batch update.
632
+
633
+ Parameters
634
+ ----------
635
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
636
+ Document word matrix.
637
+
638
+ y : Ignored
639
+ Not used, present here for API consistency by convention.
640
+
641
+ Returns
642
+ -------
643
+ self
644
+ Fitted estimator.
645
+ """
646
+ X = self._check_non_neg_array(
647
+ X, reset_n_features=True, whom="LatentDirichletAllocation.fit"
648
+ )
649
+ n_samples, n_features = X.shape
650
+ max_iter = self.max_iter
651
+ evaluate_every = self.evaluate_every
652
+ learning_method = self.learning_method
653
+
654
+ batch_size = self.batch_size
655
+
656
+ # initialize parameters
657
+ self._init_latent_vars(n_features, dtype=X.dtype)
658
+ # change to perplexity later
659
+ last_bound = None
660
+ n_jobs = effective_n_jobs(self.n_jobs)
661
+ with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
662
+ for i in range(max_iter):
663
+ if learning_method == "online":
664
+ for idx_slice in gen_batches(n_samples, batch_size):
665
+ self._em_step(
666
+ X[idx_slice, :],
667
+ total_samples=n_samples,
668
+ batch_update=False,
669
+ parallel=parallel,
670
+ )
671
+ else:
672
+ # batch update
673
+ self._em_step(
674
+ X, total_samples=n_samples, batch_update=True, parallel=parallel
675
+ )
676
+
677
+ # check perplexity
678
+ if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
679
+ doc_topics_distr, _ = self._e_step(
680
+ X, cal_sstats=False, random_init=False, parallel=parallel
681
+ )
682
+ bound = self._perplexity_precomp_distr(
683
+ X, doc_topics_distr, sub_sampling=False
684
+ )
685
+ if self.verbose:
686
+ print(
687
+ "iteration: %d of max_iter: %d, perplexity: %.4f"
688
+ % (i + 1, max_iter, bound)
689
+ )
690
+
691
+ if last_bound and abs(last_bound - bound) < self.perp_tol:
692
+ break
693
+ last_bound = bound
694
+
695
+ elif self.verbose:
696
+ print("iteration: %d of max_iter: %d" % (i + 1, max_iter))
697
+ self.n_iter_ += 1
698
+
699
+ # calculate final perplexity value on train set
700
+ doc_topics_distr, _ = self._e_step(
701
+ X, cal_sstats=False, random_init=False, parallel=parallel
702
+ )
703
+ self.bound_ = self._perplexity_precomp_distr(
704
+ X, doc_topics_distr, sub_sampling=False
705
+ )
706
+
707
+ return self
708
+
709
+ def _unnormalized_transform(self, X):
710
+ """Transform data X according to fitted model.
711
+
712
+ Parameters
713
+ ----------
714
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
715
+ Document word matrix.
716
+
717
+ Returns
718
+ -------
719
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
720
+ Document topic distribution for X.
721
+ """
722
+ doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False)
723
+
724
+ return doc_topic_distr
725
+
726
+ def transform(self, X):
727
+ """Transform data X according to the fitted model.
728
+
729
+ .. versionchanged:: 0.18
730
+ *doc_topic_distr* is now normalized
731
+
732
+ Parameters
733
+ ----------
734
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
735
+ Document word matrix.
736
+
737
+ Returns
738
+ -------
739
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
740
+ Document topic distribution for X.
741
+ """
742
+ check_is_fitted(self)
743
+ X = self._check_non_neg_array(
744
+ X, reset_n_features=False, whom="LatentDirichletAllocation.transform"
745
+ )
746
+ doc_topic_distr = self._unnormalized_transform(X)
747
+ doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
748
+ return doc_topic_distr
749
+
750
+ def _approx_bound(self, X, doc_topic_distr, sub_sampling):
751
+ """Estimate the variational bound.
752
+
753
+ Estimate the variational bound over "all documents" using only the
754
+ documents passed in as X. Since log-likelihood of each word cannot
755
+ be computed directly, we use this bound to estimate it.
756
+
757
+ Parameters
758
+ ----------
759
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
760
+ Document word matrix.
761
+
762
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
763
+ Document topic distribution. In the literature, this is called
764
+ gamma.
765
+
766
+ sub_sampling : bool, default=False
767
+ Compensate for subsampling of documents.
768
+ It is used in calculate bound in online learning.
769
+
770
+ Returns
771
+ -------
772
+ score : float
773
+
774
+ """
775
+
776
+ def _loglikelihood(prior, distr, dirichlet_distr, size):
777
+ # calculate log-likelihood
778
+ score = np.sum((prior - distr) * dirichlet_distr)
779
+ score += np.sum(gammaln(distr) - gammaln(prior))
780
+ score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
781
+ return score
782
+
783
+ is_sparse_x = sp.issparse(X)
784
+ n_samples, n_components = doc_topic_distr.shape
785
+ n_features = self.components_.shape[1]
786
+ score = 0
787
+
788
+ dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
789
+ dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
790
+ doc_topic_prior = self.doc_topic_prior_
791
+ topic_word_prior = self.topic_word_prior_
792
+
793
+ if is_sparse_x:
794
+ X_data = X.data
795
+ X_indices = X.indices
796
+ X_indptr = X.indptr
797
+
798
+ # E[log p(docs | theta, beta)]
799
+ for idx_d in range(0, n_samples):
800
+ if is_sparse_x:
801
+ ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
802
+ cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
803
+ else:
804
+ ids = np.nonzero(X[idx_d, :])[0]
805
+ cnts = X[idx_d, ids]
806
+ temp = (
807
+ dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
808
+ )
809
+ norm_phi = logsumexp(temp, axis=0)
810
+ score += np.dot(cnts, norm_phi)
811
+
812
+ # compute E[log p(theta | alpha) - log q(theta | gamma)]
813
+ score += _loglikelihood(
814
+ doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components
815
+ )
816
+
817
+ # Compensate for the subsampling of the population of documents
818
+ if sub_sampling:
819
+ doc_ratio = float(self.total_samples) / n_samples
820
+ score *= doc_ratio
821
+
822
+ # E[log p(beta | eta) - log q (beta | lambda)]
823
+ score += _loglikelihood(
824
+ topic_word_prior, self.components_, dirichlet_component_, n_features
825
+ )
826
+
827
+ return score
828
+
829
+ def score(self, X, y=None):
830
+ """Calculate approximate log-likelihood as score.
831
+
832
+ Parameters
833
+ ----------
834
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
835
+ Document word matrix.
836
+
837
+ y : Ignored
838
+ Not used, present here for API consistency by convention.
839
+
840
+ Returns
841
+ -------
842
+ score : float
843
+ Use approximate bound as score.
844
+ """
845
+ check_is_fitted(self)
846
+ X = self._check_non_neg_array(
847
+ X, reset_n_features=False, whom="LatentDirichletAllocation.score"
848
+ )
849
+
850
+ doc_topic_distr = self._unnormalized_transform(X)
851
+ score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
852
+ return score
853
+
854
+ def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
855
+ """Calculate approximate perplexity for data X with ability to accept
856
+ precomputed doc_topic_distr
857
+
858
+ Perplexity is defined as exp(-1. * log-likelihood per word)
859
+
860
+ Parameters
861
+ ----------
862
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
863
+ Document word matrix.
864
+
865
+ doc_topic_distr : ndarray of shape (n_samples, n_components), \
866
+ default=None
867
+ Document topic distribution.
868
+ If it is None, it will be generated by applying transform on X.
869
+
870
+ Returns
871
+ -------
872
+ score : float
873
+ Perplexity score.
874
+ """
875
+ if doc_topic_distr is None:
876
+ doc_topic_distr = self._unnormalized_transform(X)
877
+ else:
878
+ n_samples, n_components = doc_topic_distr.shape
879
+ if n_samples != X.shape[0]:
880
+ raise ValueError(
881
+ "Number of samples in X and doc_topic_distr do not match."
882
+ )
883
+
884
+ if n_components != self.n_components:
885
+ raise ValueError("Number of topics does not match.")
886
+
887
+ current_samples = X.shape[0]
888
+ bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
889
+
890
+ if sub_sampling:
891
+ word_cnt = X.sum() * (float(self.total_samples) / current_samples)
892
+ else:
893
+ word_cnt = X.sum()
894
+ perword_bound = bound / word_cnt
895
+
896
+ return np.exp(-1.0 * perword_bound)
897
+
898
+ def perplexity(self, X, sub_sampling=False):
899
+ """Calculate approximate perplexity for data X.
900
+
901
+ Perplexity is defined as exp(-1. * log-likelihood per word)
902
+
903
+ .. versionchanged:: 0.19
904
+ *doc_topic_distr* argument has been deprecated and is ignored
905
+ because user no longer has access to unnormalized distribution
906
+
907
+ Parameters
908
+ ----------
909
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
910
+ Document word matrix.
911
+
912
+ sub_sampling : bool
913
+ Do sub-sampling or not.
914
+
915
+ Returns
916
+ -------
917
+ score : float
918
+ Perplexity score.
919
+ """
920
+ check_is_fitted(self)
921
+ X = self._check_non_neg_array(
922
+ X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity"
923
+ )
924
+ return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
925
+
926
+ @property
927
+ def _n_features_out(self):
928
+ """Number of transformed output features."""
929
+ return self.components_.shape[0]
venv/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py ADDED
@@ -0,0 +1,2443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Non-negative matrix factorization.
2
+ """
3
+ # Author: Vlad Niculae
4
+ # Lars Buitinck
5
+ # Mathieu Blondel <[email protected]>
6
+ # Tom Dupre la Tour
7
+ # License: BSD 3 clause
8
+
9
+ import itertools
10
+ import time
11
+ import warnings
12
+ from abc import ABC
13
+ from math import sqrt
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ import scipy.sparse as sp
18
+ from scipy import linalg
19
+
20
+ from .._config import config_context
21
+ from ..base import (
22
+ BaseEstimator,
23
+ ClassNamePrefixFeaturesOutMixin,
24
+ TransformerMixin,
25
+ _fit_context,
26
+ )
27
+ from ..exceptions import ConvergenceWarning
28
+ from ..utils import check_array, check_random_state, gen_batches, metadata_routing
29
+ from ..utils._param_validation import (
30
+ Hidden,
31
+ Interval,
32
+ StrOptions,
33
+ validate_params,
34
+ )
35
+ from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
36
+ from ..utils.validation import (
37
+ check_is_fitted,
38
+ check_non_negative,
39
+ )
40
+ from ._cdnmf_fast import _update_cdnmf_fast
41
+
42
+ EPSILON = np.finfo(np.float32).eps
43
+
44
+
45
+ def norm(x):
46
+ """Dot product-based Euclidean norm implementation.
47
+
48
+ See: http://fa.bianp.net/blog/2011/computing-the-vector-norm/
49
+
50
+ Parameters
51
+ ----------
52
+ x : array-like
53
+ Vector for which to compute the norm.
54
+ """
55
+ return sqrt(squared_norm(x))
56
+
57
+
58
+ def trace_dot(X, Y):
59
+ """Trace of np.dot(X, Y.T).
60
+
61
+ Parameters
62
+ ----------
63
+ X : array-like
64
+ First matrix.
65
+ Y : array-like
66
+ Second matrix.
67
+ """
68
+ return np.dot(X.ravel(), Y.ravel())
69
+
70
+
71
+ def _check_init(A, shape, whom):
72
+ A = check_array(A)
73
+ if shape[0] != "auto" and A.shape[0] != shape[0]:
74
+ raise ValueError(
75
+ f"Array with wrong first dimension passed to {whom}. Expected {shape[0]}, "
76
+ f"but got {A.shape[0]}."
77
+ )
78
+ if shape[1] != "auto" and A.shape[1] != shape[1]:
79
+ raise ValueError(
80
+ f"Array with wrong second dimension passed to {whom}. Expected {shape[1]}, "
81
+ f"but got {A.shape[1]}."
82
+ )
83
+ check_non_negative(A, whom)
84
+ if np.max(A) == 0:
85
+ raise ValueError(f"Array passed to {whom} is full of zeros.")
86
+
87
+
88
+ def _beta_divergence(X, W, H, beta, square_root=False):
89
+ """Compute the beta-divergence of X and dot(W, H).
90
+
91
+ Parameters
92
+ ----------
93
+ X : float or array-like of shape (n_samples, n_features)
94
+
95
+ W : float or array-like of shape (n_samples, n_components)
96
+
97
+ H : float or array-like of shape (n_components, n_features)
98
+
99
+ beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}
100
+ Parameter of the beta-divergence.
101
+ If beta == 2, this is half the Frobenius *squared* norm.
102
+ If beta == 1, this is the generalized Kullback-Leibler divergence.
103
+ If beta == 0, this is the Itakura-Saito divergence.
104
+ Else, this is the general beta-divergence.
105
+
106
+ square_root : bool, default=False
107
+ If True, return np.sqrt(2 * res)
108
+ For beta == 2, it corresponds to the Frobenius norm.
109
+
110
+ Returns
111
+ -------
112
+ res : float
113
+ Beta divergence of X and np.dot(X, H).
114
+ """
115
+ beta = _beta_loss_to_float(beta)
116
+
117
+ # The method can be called with scalars
118
+ if not sp.issparse(X):
119
+ X = np.atleast_2d(X)
120
+ W = np.atleast_2d(W)
121
+ H = np.atleast_2d(H)
122
+
123
+ # Frobenius norm
124
+ if beta == 2:
125
+ # Avoid the creation of the dense np.dot(W, H) if X is sparse.
126
+ if sp.issparse(X):
127
+ norm_X = np.dot(X.data, X.data)
128
+ norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
129
+ cross_prod = trace_dot((X @ H.T), W)
130
+ res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
131
+ else:
132
+ res = squared_norm(X - np.dot(W, H)) / 2.0
133
+
134
+ if square_root:
135
+ return np.sqrt(res * 2)
136
+ else:
137
+ return res
138
+
139
+ if sp.issparse(X):
140
+ # compute np.dot(W, H) only where X is nonzero
141
+ WH_data = _special_sparse_dot(W, H, X).data
142
+ X_data = X.data
143
+ else:
144
+ WH = np.dot(W, H)
145
+ WH_data = WH.ravel()
146
+ X_data = X.ravel()
147
+
148
+ # do not affect the zeros: here 0 ** (-1) = 0 and not infinity
149
+ indices = X_data > EPSILON
150
+ WH_data = WH_data[indices]
151
+ X_data = X_data[indices]
152
+
153
+ # used to avoid division by zero
154
+ WH_data[WH_data < EPSILON] = EPSILON
155
+
156
+ # generalized Kullback-Leibler divergence
157
+ if beta == 1:
158
+ # fast and memory efficient computation of np.sum(np.dot(W, H))
159
+ sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
160
+ # computes np.sum(X * log(X / WH)) only where X is nonzero
161
+ div = X_data / WH_data
162
+ res = np.dot(X_data, np.log(div))
163
+ # add full np.sum(np.dot(W, H)) - np.sum(X)
164
+ res += sum_WH - X_data.sum()
165
+
166
+ # Itakura-Saito divergence
167
+ elif beta == 0:
168
+ div = X_data / WH_data
169
+ res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
170
+
171
+ # beta-divergence, beta not in (0, 1, 2)
172
+ else:
173
+ if sp.issparse(X):
174
+ # slow loop, but memory efficient computation of :
175
+ # np.sum(np.dot(W, H) ** beta)
176
+ sum_WH_beta = 0
177
+ for i in range(X.shape[1]):
178
+ sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
179
+
180
+ else:
181
+ sum_WH_beta = np.sum(WH**beta)
182
+
183
+ sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
184
+ res = (X_data**beta).sum() - beta * sum_X_WH
185
+ res += sum_WH_beta * (beta - 1)
186
+ res /= beta * (beta - 1)
187
+
188
+ if square_root:
189
+ res = max(res, 0) # avoid negative number due to rounding errors
190
+ return np.sqrt(2 * res)
191
+ else:
192
+ return res
193
+
194
+
195
+ def _special_sparse_dot(W, H, X):
196
+ """Computes np.dot(W, H), only where X is non zero."""
197
+ if sp.issparse(X):
198
+ ii, jj = X.nonzero()
199
+ n_vals = ii.shape[0]
200
+ dot_vals = np.empty(n_vals)
201
+ n_components = W.shape[1]
202
+
203
+ batch_size = max(n_components, n_vals // n_components)
204
+ for start in range(0, n_vals, batch_size):
205
+ batch = slice(start, start + batch_size)
206
+ dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(
207
+ axis=1
208
+ )
209
+
210
+ WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
211
+ return WH.tocsr()
212
+ else:
213
+ return np.dot(W, H)
214
+
215
+
216
+ def _beta_loss_to_float(beta_loss):
217
+ """Convert string beta_loss to float."""
218
+ beta_loss_map = {"frobenius": 2, "kullback-leibler": 1, "itakura-saito": 0}
219
+ if isinstance(beta_loss, str):
220
+ beta_loss = beta_loss_map[beta_loss]
221
+ return beta_loss
222
+
223
+
224
+ def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None):
225
+ """Algorithms for NMF initialization.
226
+
227
+ Computes an initial guess for the non-negative
228
+ rank k matrix approximation for X: X = WH.
229
+
230
+ Parameters
231
+ ----------
232
+ X : array-like of shape (n_samples, n_features)
233
+ The data matrix to be decomposed.
234
+
235
+ n_components : int
236
+ The number of components desired in the approximation.
237
+
238
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None
239
+ Method used to initialize the procedure.
240
+ Valid options:
241
+
242
+ - None: 'nndsvda' if n_components <= min(n_samples, n_features),
243
+ otherwise 'random'.
244
+
245
+ - 'random': non-negative random matrices, scaled with:
246
+ sqrt(X.mean() / n_components)
247
+
248
+ - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
249
+ initialization (better for sparseness)
250
+
251
+ - 'nndsvda': NNDSVD with zeros filled with the average of X
252
+ (better when sparsity is not desired)
253
+
254
+ - 'nndsvdar': NNDSVD with zeros filled with small random values
255
+ (generally faster, less accurate alternative to NNDSVDa
256
+ for when sparsity is not desired)
257
+
258
+ - 'custom': use custom matrices W and H
259
+
260
+ .. versionchanged:: 1.1
261
+ When `init=None` and n_components is less than n_samples and n_features
262
+ defaults to `nndsvda` instead of `nndsvd`.
263
+
264
+ eps : float, default=1e-6
265
+ Truncate all values less then this in output to zero.
266
+
267
+ random_state : int, RandomState instance or None, default=None
268
+ Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
269
+ reproducible results across multiple function calls.
270
+ See :term:`Glossary <random_state>`.
271
+
272
+ Returns
273
+ -------
274
+ W : array-like of shape (n_samples, n_components)
275
+ Initial guesses for solving X ~= WH.
276
+
277
+ H : array-like of shape (n_components, n_features)
278
+ Initial guesses for solving X ~= WH.
279
+
280
+ References
281
+ ----------
282
+ C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
283
+ nonnegative matrix factorization - Pattern Recognition, 2008
284
+ http://tinyurl.com/nndsvd
285
+ """
286
+ check_non_negative(X, "NMF initialization")
287
+ n_samples, n_features = X.shape
288
+
289
+ if (
290
+ init is not None
291
+ and init != "random"
292
+ and n_components > min(n_samples, n_features)
293
+ ):
294
+ raise ValueError(
295
+ "init = '{}' can only be used when "
296
+ "n_components <= min(n_samples, n_features)".format(init)
297
+ )
298
+
299
+ if init is None:
300
+ if n_components <= min(n_samples, n_features):
301
+ init = "nndsvda"
302
+ else:
303
+ init = "random"
304
+
305
+ # Random initialization
306
+ if init == "random":
307
+ avg = np.sqrt(X.mean() / n_components)
308
+ rng = check_random_state(random_state)
309
+ H = avg * rng.standard_normal(size=(n_components, n_features)).astype(
310
+ X.dtype, copy=False
311
+ )
312
+ W = avg * rng.standard_normal(size=(n_samples, n_components)).astype(
313
+ X.dtype, copy=False
314
+ )
315
+ np.abs(H, out=H)
316
+ np.abs(W, out=W)
317
+ return W, H
318
+
319
+ # NNDSVD initialization
320
+ U, S, V = randomized_svd(X, n_components, random_state=random_state)
321
+ W = np.zeros_like(U)
322
+ H = np.zeros_like(V)
323
+
324
+ # The leading singular triplet is non-negative
325
+ # so it can be used as is for initialization.
326
+ W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
327
+ H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
328
+
329
+ for j in range(1, n_components):
330
+ x, y = U[:, j], V[j, :]
331
+
332
+ # extract positive and negative parts of column vectors
333
+ x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
334
+ x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
335
+
336
+ # and their norms
337
+ x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
338
+ x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
339
+
340
+ m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
341
+
342
+ # choose update
343
+ if m_p > m_n:
344
+ u = x_p / x_p_nrm
345
+ v = y_p / y_p_nrm
346
+ sigma = m_p
347
+ else:
348
+ u = x_n / x_n_nrm
349
+ v = y_n / y_n_nrm
350
+ sigma = m_n
351
+
352
+ lbd = np.sqrt(S[j] * sigma)
353
+ W[:, j] = lbd * u
354
+ H[j, :] = lbd * v
355
+
356
+ W[W < eps] = 0
357
+ H[H < eps] = 0
358
+
359
+ if init == "nndsvd":
360
+ pass
361
+ elif init == "nndsvda":
362
+ avg = X.mean()
363
+ W[W == 0] = avg
364
+ H[H == 0] = avg
365
+ elif init == "nndsvdar":
366
+ rng = check_random_state(random_state)
367
+ avg = X.mean()
368
+ W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100)
369
+ H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100)
370
+ else:
371
+ raise ValueError(
372
+ "Invalid init parameter: got %r instead of one of %r"
373
+ % (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar"))
374
+ )
375
+
376
+ return W, H
377
+
378
+
379
+ def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state):
380
+ """Helper function for _fit_coordinate_descent.
381
+
382
+ Update W to minimize the objective function, iterating once over all
383
+ coordinates. By symmetry, to update H, one can call
384
+ _update_coordinate_descent(X.T, Ht, W, ...).
385
+
386
+ """
387
+ n_components = Ht.shape[1]
388
+
389
+ HHt = np.dot(Ht.T, Ht)
390
+ XHt = safe_sparse_dot(X, Ht)
391
+
392
+ # L2 regularization corresponds to increase of the diagonal of HHt
393
+ if l2_reg != 0.0:
394
+ # adds l2_reg only on the diagonal
395
+ HHt.flat[:: n_components + 1] += l2_reg
396
+ # L1 regularization corresponds to decrease of each element of XHt
397
+ if l1_reg != 0.0:
398
+ XHt -= l1_reg
399
+
400
+ if shuffle:
401
+ permutation = random_state.permutation(n_components)
402
+ else:
403
+ permutation = np.arange(n_components)
404
+ # The following seems to be required on 64-bit Windows w/ Python 3.5.
405
+ permutation = np.asarray(permutation, dtype=np.intp)
406
+ return _update_cdnmf_fast(W, HHt, XHt, permutation)
407
+
408
+
409
+ def _fit_coordinate_descent(
410
+ X,
411
+ W,
412
+ H,
413
+ tol=1e-4,
414
+ max_iter=200,
415
+ l1_reg_W=0,
416
+ l1_reg_H=0,
417
+ l2_reg_W=0,
418
+ l2_reg_H=0,
419
+ update_H=True,
420
+ verbose=0,
421
+ shuffle=False,
422
+ random_state=None,
423
+ ):
424
+ """Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
425
+
426
+ The objective function is minimized with an alternating minimization of W
427
+ and H. Each minimization is done with a cyclic (up to a permutation of the
428
+ features) Coordinate Descent.
429
+
430
+ Parameters
431
+ ----------
432
+ X : array-like of shape (n_samples, n_features)
433
+ Constant matrix.
434
+
435
+ W : array-like of shape (n_samples, n_components)
436
+ Initial guess for the solution.
437
+
438
+ H : array-like of shape (n_components, n_features)
439
+ Initial guess for the solution.
440
+
441
+ tol : float, default=1e-4
442
+ Tolerance of the stopping condition.
443
+
444
+ max_iter : int, default=200
445
+ Maximum number of iterations before timing out.
446
+
447
+ l1_reg_W : float, default=0.
448
+ L1 regularization parameter for W.
449
+
450
+ l1_reg_H : float, default=0.
451
+ L1 regularization parameter for H.
452
+
453
+ l2_reg_W : float, default=0.
454
+ L2 regularization parameter for W.
455
+
456
+ l2_reg_H : float, default=0.
457
+ L2 regularization parameter for H.
458
+
459
+ update_H : bool, default=True
460
+ Set to True, both W and H will be estimated from initial guesses.
461
+ Set to False, only W will be estimated.
462
+
463
+ verbose : int, default=0
464
+ The verbosity level.
465
+
466
+ shuffle : bool, default=False
467
+ If true, randomize the order of coordinates in the CD solver.
468
+
469
+ random_state : int, RandomState instance or None, default=None
470
+ Used to randomize the coordinates in the CD solver, when
471
+ ``shuffle`` is set to ``True``. Pass an int for reproducible
472
+ results across multiple function calls.
473
+ See :term:`Glossary <random_state>`.
474
+
475
+ Returns
476
+ -------
477
+ W : ndarray of shape (n_samples, n_components)
478
+ Solution to the non-negative least squares problem.
479
+
480
+ H : ndarray of shape (n_components, n_features)
481
+ Solution to the non-negative least squares problem.
482
+
483
+ n_iter : int
484
+ The number of iterations done by the algorithm.
485
+
486
+ References
487
+ ----------
488
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
489
+ factorizations" <10.1587/transfun.E92.A.708>`
490
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
491
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
492
+ """
493
+ # so W and Ht are both in C order in memory
494
+ Ht = check_array(H.T, order="C")
495
+ X = check_array(X, accept_sparse="csr")
496
+
497
+ rng = check_random_state(random_state)
498
+
499
+ for n_iter in range(1, max_iter + 1):
500
+ violation = 0.0
501
+
502
+ # Update W
503
+ violation += _update_coordinate_descent(
504
+ X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng
505
+ )
506
+ # Update H
507
+ if update_H:
508
+ violation += _update_coordinate_descent(
509
+ X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng
510
+ )
511
+
512
+ if n_iter == 1:
513
+ violation_init = violation
514
+
515
+ if violation_init == 0:
516
+ break
517
+
518
+ if verbose:
519
+ print("violation:", violation / violation_init)
520
+
521
+ if violation / violation_init <= tol:
522
+ if verbose:
523
+ print("Converged at iteration", n_iter + 1)
524
+ break
525
+
526
+ return W, Ht.T, n_iter
527
+
528
+
529
+ def _multiplicative_update_w(
530
+ X,
531
+ W,
532
+ H,
533
+ beta_loss,
534
+ l1_reg_W,
535
+ l2_reg_W,
536
+ gamma,
537
+ H_sum=None,
538
+ HHt=None,
539
+ XHt=None,
540
+ update_H=True,
541
+ ):
542
+ """Update W in Multiplicative Update NMF."""
543
+ if beta_loss == 2:
544
+ # Numerator
545
+ if XHt is None:
546
+ XHt = safe_sparse_dot(X, H.T)
547
+ if update_H:
548
+ # avoid a copy of XHt, which will be re-computed (update_H=True)
549
+ numerator = XHt
550
+ else:
551
+ # preserve the XHt, which is not re-computed (update_H=False)
552
+ numerator = XHt.copy()
553
+
554
+ # Denominator
555
+ if HHt is None:
556
+ HHt = np.dot(H, H.T)
557
+ denominator = np.dot(W, HHt)
558
+
559
+ else:
560
+ # Numerator
561
+ # if X is sparse, compute WH only where X is non zero
562
+ WH_safe_X = _special_sparse_dot(W, H, X)
563
+ if sp.issparse(X):
564
+ WH_safe_X_data = WH_safe_X.data
565
+ X_data = X.data
566
+ else:
567
+ WH_safe_X_data = WH_safe_X
568
+ X_data = X
569
+ # copy used in the Denominator
570
+ WH = WH_safe_X.copy()
571
+ if beta_loss - 1.0 < 0:
572
+ WH[WH < EPSILON] = EPSILON
573
+
574
+ # to avoid taking a negative power of zero
575
+ if beta_loss - 2.0 < 0:
576
+ WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
577
+
578
+ if beta_loss == 1:
579
+ np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
580
+ elif beta_loss == 0:
581
+ # speeds up computation time
582
+ # refer to /numpy/numpy/issues/9363
583
+ WH_safe_X_data **= -1
584
+ WH_safe_X_data **= 2
585
+ # element-wise multiplication
586
+ WH_safe_X_data *= X_data
587
+ else:
588
+ WH_safe_X_data **= beta_loss - 2
589
+ # element-wise multiplication
590
+ WH_safe_X_data *= X_data
591
+
592
+ # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
593
+ numerator = safe_sparse_dot(WH_safe_X, H.T)
594
+
595
+ # Denominator
596
+ if beta_loss == 1:
597
+ if H_sum is None:
598
+ H_sum = np.sum(H, axis=1) # shape(n_components, )
599
+ denominator = H_sum[np.newaxis, :]
600
+
601
+ else:
602
+ # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
603
+ if sp.issparse(X):
604
+ # memory efficient computation
605
+ # (compute row by row, avoiding the dense matrix WH)
606
+ WHHt = np.empty(W.shape)
607
+ for i in range(X.shape[0]):
608
+ WHi = np.dot(W[i, :], H)
609
+ if beta_loss - 1 < 0:
610
+ WHi[WHi < EPSILON] = EPSILON
611
+ WHi **= beta_loss - 1
612
+ WHHt[i, :] = np.dot(WHi, H.T)
613
+ else:
614
+ WH **= beta_loss - 1
615
+ WHHt = np.dot(WH, H.T)
616
+ denominator = WHHt
617
+
618
+ # Add L1 and L2 regularization
619
+ if l1_reg_W > 0:
620
+ denominator += l1_reg_W
621
+ if l2_reg_W > 0:
622
+ denominator = denominator + l2_reg_W * W
623
+ denominator[denominator == 0] = EPSILON
624
+
625
+ numerator /= denominator
626
+ delta_W = numerator
627
+
628
+ # gamma is in ]0, 1]
629
+ if gamma != 1:
630
+ delta_W **= gamma
631
+
632
+ W *= delta_W
633
+
634
+ return W, H_sum, HHt, XHt
635
+
636
+
637
+ def _multiplicative_update_h(
638
+ X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None
639
+ ):
640
+ """update H in Multiplicative Update NMF."""
641
+ if beta_loss == 2:
642
+ numerator = safe_sparse_dot(W.T, X)
643
+ denominator = np.linalg.multi_dot([W.T, W, H])
644
+
645
+ else:
646
+ # Numerator
647
+ WH_safe_X = _special_sparse_dot(W, H, X)
648
+ if sp.issparse(X):
649
+ WH_safe_X_data = WH_safe_X.data
650
+ X_data = X.data
651
+ else:
652
+ WH_safe_X_data = WH_safe_X
653
+ X_data = X
654
+ # copy used in the Denominator
655
+ WH = WH_safe_X.copy()
656
+ if beta_loss - 1.0 < 0:
657
+ WH[WH < EPSILON] = EPSILON
658
+
659
+ # to avoid division by zero
660
+ if beta_loss - 2.0 < 0:
661
+ WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
662
+
663
+ if beta_loss == 1:
664
+ np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
665
+ elif beta_loss == 0:
666
+ # speeds up computation time
667
+ # refer to /numpy/numpy/issues/9363
668
+ WH_safe_X_data **= -1
669
+ WH_safe_X_data **= 2
670
+ # element-wise multiplication
671
+ WH_safe_X_data *= X_data
672
+ else:
673
+ WH_safe_X_data **= beta_loss - 2
674
+ # element-wise multiplication
675
+ WH_safe_X_data *= X_data
676
+
677
+ # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
678
+ numerator = safe_sparse_dot(W.T, WH_safe_X)
679
+
680
+ # Denominator
681
+ if beta_loss == 1:
682
+ W_sum = np.sum(W, axis=0) # shape(n_components, )
683
+ W_sum[W_sum == 0] = 1.0
684
+ denominator = W_sum[:, np.newaxis]
685
+
686
+ # beta_loss not in (1, 2)
687
+ else:
688
+ # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
689
+ if sp.issparse(X):
690
+ # memory efficient computation
691
+ # (compute column by column, avoiding the dense matrix WH)
692
+ WtWH = np.empty(H.shape)
693
+ for i in range(X.shape[1]):
694
+ WHi = np.dot(W, H[:, i])
695
+ if beta_loss - 1 < 0:
696
+ WHi[WHi < EPSILON] = EPSILON
697
+ WHi **= beta_loss - 1
698
+ WtWH[:, i] = np.dot(W.T, WHi)
699
+ else:
700
+ WH **= beta_loss - 1
701
+ WtWH = np.dot(W.T, WH)
702
+ denominator = WtWH
703
+
704
+ # Add L1 and L2 regularization
705
+ if l1_reg_H > 0:
706
+ denominator += l1_reg_H
707
+ if l2_reg_H > 0:
708
+ denominator = denominator + l2_reg_H * H
709
+ denominator[denominator == 0] = EPSILON
710
+
711
+ if A is not None and B is not None:
712
+ # Updates for the online nmf
713
+ if gamma != 1:
714
+ H **= 1 / gamma
715
+ numerator *= H
716
+ A *= rho
717
+ B *= rho
718
+ A += numerator
719
+ B += denominator
720
+ H = A / B
721
+
722
+ if gamma != 1:
723
+ H **= gamma
724
+ else:
725
+ delta_H = numerator
726
+ delta_H /= denominator
727
+ if gamma != 1:
728
+ delta_H **= gamma
729
+ H *= delta_H
730
+
731
+ return H
732
+
733
+
734
+ def _fit_multiplicative_update(
735
+ X,
736
+ W,
737
+ H,
738
+ beta_loss="frobenius",
739
+ max_iter=200,
740
+ tol=1e-4,
741
+ l1_reg_W=0,
742
+ l1_reg_H=0,
743
+ l2_reg_W=0,
744
+ l2_reg_H=0,
745
+ update_H=True,
746
+ verbose=0,
747
+ ):
748
+ """Compute Non-negative Matrix Factorization with Multiplicative Update.
749
+
750
+ The objective function is _beta_divergence(X, WH) and is minimized with an
751
+ alternating minimization of W and H. Each minimization is done with a
752
+ Multiplicative Update.
753
+
754
+ Parameters
755
+ ----------
756
+ X : array-like of shape (n_samples, n_features)
757
+ Constant input matrix.
758
+
759
+ W : array-like of shape (n_samples, n_components)
760
+ Initial guess for the solution.
761
+
762
+ H : array-like of shape (n_components, n_features)
763
+ Initial guess for the solution.
764
+
765
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
766
+ 'itakura-saito'}, default='frobenius'
767
+ String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
768
+ Beta divergence to be minimized, measuring the distance between X
769
+ and the dot product WH. Note that values different from 'frobenius'
770
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
771
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
772
+ matrix X cannot contain zeros.
773
+
774
+ max_iter : int, default=200
775
+ Number of iterations.
776
+
777
+ tol : float, default=1e-4
778
+ Tolerance of the stopping condition.
779
+
780
+ l1_reg_W : float, default=0.
781
+ L1 regularization parameter for W.
782
+
783
+ l1_reg_H : float, default=0.
784
+ L1 regularization parameter for H.
785
+
786
+ l2_reg_W : float, default=0.
787
+ L2 regularization parameter for W.
788
+
789
+ l2_reg_H : float, default=0.
790
+ L2 regularization parameter for H.
791
+
792
+ update_H : bool, default=True
793
+ Set to True, both W and H will be estimated from initial guesses.
794
+ Set to False, only W will be estimated.
795
+
796
+ verbose : int, default=0
797
+ The verbosity level.
798
+
799
+ Returns
800
+ -------
801
+ W : ndarray of shape (n_samples, n_components)
802
+ Solution to the non-negative least squares problem.
803
+
804
+ H : ndarray of shape (n_components, n_features)
805
+ Solution to the non-negative least squares problem.
806
+
807
+ n_iter : int
808
+ The number of iterations done by the algorithm.
809
+
810
+ References
811
+ ----------
812
+ Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix
813
+ Factorization. Adv. Neural Inform. Process. Syst.. 13.
814
+ Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
815
+ factorization with the beta-divergence. Neural Computation, 23(9).
816
+ """
817
+ start_time = time.time()
818
+
819
+ beta_loss = _beta_loss_to_float(beta_loss)
820
+
821
+ # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
822
+ if beta_loss < 1:
823
+ gamma = 1.0 / (2.0 - beta_loss)
824
+ elif beta_loss > 2:
825
+ gamma = 1.0 / (beta_loss - 1.0)
826
+ else:
827
+ gamma = 1.0
828
+
829
+ # used for the convergence criterion
830
+ error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
831
+ previous_error = error_at_init
832
+
833
+ H_sum, HHt, XHt = None, None, None
834
+ for n_iter in range(1, max_iter + 1):
835
+ # update W
836
+ # H_sum, HHt and XHt are saved and reused if not update_H
837
+ W, H_sum, HHt, XHt = _multiplicative_update_w(
838
+ X,
839
+ W,
840
+ H,
841
+ beta_loss=beta_loss,
842
+ l1_reg_W=l1_reg_W,
843
+ l2_reg_W=l2_reg_W,
844
+ gamma=gamma,
845
+ H_sum=H_sum,
846
+ HHt=HHt,
847
+ XHt=XHt,
848
+ update_H=update_H,
849
+ )
850
+
851
+ # necessary for stability with beta_loss < 1
852
+ if beta_loss < 1:
853
+ W[W < np.finfo(np.float64).eps] = 0.0
854
+
855
+ # update H (only at fit or fit_transform)
856
+ if update_H:
857
+ H = _multiplicative_update_h(
858
+ X,
859
+ W,
860
+ H,
861
+ beta_loss=beta_loss,
862
+ l1_reg_H=l1_reg_H,
863
+ l2_reg_H=l2_reg_H,
864
+ gamma=gamma,
865
+ )
866
+
867
+ # These values will be recomputed since H changed
868
+ H_sum, HHt, XHt = None, None, None
869
+
870
+ # necessary for stability with beta_loss < 1
871
+ if beta_loss <= 1:
872
+ H[H < np.finfo(np.float64).eps] = 0.0
873
+
874
+ # test convergence criterion every 10 iterations
875
+ if tol > 0 and n_iter % 10 == 0:
876
+ error = _beta_divergence(X, W, H, beta_loss, square_root=True)
877
+
878
+ if verbose:
879
+ iter_time = time.time()
880
+ print(
881
+ "Epoch %02d reached after %.3f seconds, error: %f"
882
+ % (n_iter, iter_time - start_time, error)
883
+ )
884
+
885
+ if (previous_error - error) / error_at_init < tol:
886
+ break
887
+ previous_error = error
888
+
889
+ # do not print if we have already printed in the convergence test
890
+ if verbose and (tol == 0 or n_iter % 10 != 0):
891
+ end_time = time.time()
892
+ print(
893
+ "Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)
894
+ )
895
+
896
+ return W, H, n_iter
897
+
898
+
899
+ @validate_params(
900
+ {
901
+ "X": ["array-like", "sparse matrix"],
902
+ "W": ["array-like", None],
903
+ "H": ["array-like", None],
904
+ "update_H": ["boolean"],
905
+ },
906
+ prefer_skip_nested_validation=False,
907
+ )
908
+ def non_negative_factorization(
909
+ X,
910
+ W=None,
911
+ H=None,
912
+ n_components="warn",
913
+ *,
914
+ init=None,
915
+ update_H=True,
916
+ solver="cd",
917
+ beta_loss="frobenius",
918
+ tol=1e-4,
919
+ max_iter=200,
920
+ alpha_W=0.0,
921
+ alpha_H="same",
922
+ l1_ratio=0.0,
923
+ random_state=None,
924
+ verbose=0,
925
+ shuffle=False,
926
+ ):
927
+ """Compute Non-negative Matrix Factorization (NMF).
928
+
929
+ Find two non-negative matrices (W, H) whose product approximates the non-
930
+ negative matrix X. This factorization can be used for example for
931
+ dimensionality reduction, source separation or topic extraction.
932
+
933
+ The objective function is:
934
+
935
+ .. math::
936
+
937
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
938
+
939
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
940
+
941
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
942
+
943
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
944
+
945
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
946
+
947
+ Where:
948
+
949
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
950
+
951
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
952
+
953
+ The generic norm :math:`||X - WH||_{loss}^2` may represent
954
+ the Frobenius norm or another supported beta-divergence loss.
955
+ The choice between options is controlled by the `beta_loss` parameter.
956
+
957
+ The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
958
+ `H` to keep their impact balanced with respect to one another and to the data fit
959
+ term as independent as possible of the size `n_samples` of the training set.
960
+
961
+ The objective function is minimized with an alternating minimization of W
962
+ and H. If H is given and update_H=False, it solves for W only.
963
+
964
+ Note that the transformed data is named W and the components matrix is named H. In
965
+ the NMF literature, the naming convention is usually the opposite since the data
966
+ matrix X is transposed.
967
+
968
+ Parameters
969
+ ----------
970
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
971
+ Constant matrix.
972
+
973
+ W : array-like of shape (n_samples, n_components), default=None
974
+ If `init='custom'`, it is used as initial guess for the solution.
975
+ If `update_H=False`, it is initialised as an array of zeros, unless
976
+ `solver='mu'`, then it is filled with values calculated by
977
+ `np.sqrt(X.mean() / self._n_components)`.
978
+ If `None`, uses the initialisation method specified in `init`.
979
+
980
+ H : array-like of shape (n_components, n_features), default=None
981
+ If `init='custom'`, it is used as initial guess for the solution.
982
+ If `update_H=False`, it is used as a constant, to solve for W only.
983
+ If `None`, uses the initialisation method specified in `init`.
984
+
985
+ n_components : int or {'auto'} or None, default=None
986
+ Number of components, if n_components is not set all features
987
+ are kept.
988
+ If `n_components='auto'`, the number of components is automatically inferred
989
+ from `W` or `H` shapes.
990
+
991
+ .. versionchanged:: 1.4
992
+ Added `'auto'` value.
993
+
994
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
995
+ Method used to initialize the procedure.
996
+
997
+ Valid options:
998
+
999
+ - None: 'nndsvda' if n_components < n_features, otherwise 'random'.
1000
+ - 'random': non-negative random matrices, scaled with:
1001
+ `sqrt(X.mean() / n_components)`
1002
+ - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
1003
+ initialization (better for sparseness)
1004
+ - 'nndsvda': NNDSVD with zeros filled with the average of X
1005
+ (better when sparsity is not desired)
1006
+ - 'nndsvdar': NNDSVD with zeros filled with small random values
1007
+ (generally faster, less accurate alternative to NNDSVDa
1008
+ for when sparsity is not desired)
1009
+ - 'custom': If `update_H=True`, use custom matrices W and H which must both
1010
+ be provided. If `update_H=False`, then only custom matrix H is used.
1011
+
1012
+ .. versionchanged:: 0.23
1013
+ The default value of `init` changed from 'random' to None in 0.23.
1014
+
1015
+ .. versionchanged:: 1.1
1016
+ When `init=None` and n_components is less than n_samples and n_features
1017
+ defaults to `nndsvda` instead of `nndsvd`.
1018
+
1019
+ update_H : bool, default=True
1020
+ Set to True, both W and H will be estimated from initial guesses.
1021
+ Set to False, only W will be estimated.
1022
+
1023
+ solver : {'cd', 'mu'}, default='cd'
1024
+ Numerical solver to use:
1025
+
1026
+ - 'cd' is a Coordinate Descent solver that uses Fast Hierarchical
1027
+ Alternating Least Squares (Fast HALS).
1028
+ - 'mu' is a Multiplicative Update solver.
1029
+
1030
+ .. versionadded:: 0.17
1031
+ Coordinate Descent solver.
1032
+
1033
+ .. versionadded:: 0.19
1034
+ Multiplicative Update solver.
1035
+
1036
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1037
+ 'itakura-saito'}, default='frobenius'
1038
+ Beta divergence to be minimized, measuring the distance between X
1039
+ and the dot product WH. Note that values different from 'frobenius'
1040
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1041
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
1042
+ matrix X cannot contain zeros. Used only in 'mu' solver.
1043
+
1044
+ .. versionadded:: 0.19
1045
+
1046
+ tol : float, default=1e-4
1047
+ Tolerance of the stopping condition.
1048
+
1049
+ max_iter : int, default=200
1050
+ Maximum number of iterations before timing out.
1051
+
1052
+ alpha_W : float, default=0.0
1053
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1054
+ (default) to have no regularization on `W`.
1055
+
1056
+ .. versionadded:: 1.0
1057
+
1058
+ alpha_H : float or "same", default="same"
1059
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1060
+ have no regularization on `H`. If "same" (default), it takes the same value as
1061
+ `alpha_W`.
1062
+
1063
+ .. versionadded:: 1.0
1064
+
1065
+ l1_ratio : float, default=0.0
1066
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1067
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1068
+ (aka Frobenius Norm).
1069
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1070
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1071
+
1072
+ random_state : int, RandomState instance or None, default=None
1073
+ Used for NMF initialisation (when ``init`` == 'nndsvdar' or
1074
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1075
+ results across multiple function calls.
1076
+ See :term:`Glossary <random_state>`.
1077
+
1078
+ verbose : int, default=0
1079
+ The verbosity level.
1080
+
1081
+ shuffle : bool, default=False
1082
+ If true, randomize the order of coordinates in the CD solver.
1083
+
1084
+ Returns
1085
+ -------
1086
+ W : ndarray of shape (n_samples, n_components)
1087
+ Solution to the non-negative least squares problem.
1088
+
1089
+ H : ndarray of shape (n_components, n_features)
1090
+ Solution to the non-negative least squares problem.
1091
+
1092
+ n_iter : int
1093
+ Actual number of iterations.
1094
+
1095
+ References
1096
+ ----------
1097
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1098
+ factorizations" <10.1587/transfun.E92.A.708>`
1099
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1100
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1101
+
1102
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1103
+ beta-divergence" <10.1162/NECO_a_00168>`
1104
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1105
+
1106
+ Examples
1107
+ --------
1108
+ >>> import numpy as np
1109
+ >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
1110
+ >>> from sklearn.decomposition import non_negative_factorization
1111
+ >>> W, H, n_iter = non_negative_factorization(
1112
+ ... X, n_components=2, init='random', random_state=0)
1113
+ """
1114
+ est = NMF(
1115
+ n_components=n_components,
1116
+ init=init,
1117
+ solver=solver,
1118
+ beta_loss=beta_loss,
1119
+ tol=tol,
1120
+ max_iter=max_iter,
1121
+ random_state=random_state,
1122
+ alpha_W=alpha_W,
1123
+ alpha_H=alpha_H,
1124
+ l1_ratio=l1_ratio,
1125
+ verbose=verbose,
1126
+ shuffle=shuffle,
1127
+ )
1128
+ est._validate_params()
1129
+
1130
+ X = check_array(X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32])
1131
+
1132
+ with config_context(assume_finite=True):
1133
+ W, H, n_iter = est._fit_transform(X, W=W, H=H, update_H=update_H)
1134
+
1135
+ return W, H, n_iter
1136
+
1137
+
1138
+ class _BaseNMF(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, ABC):
1139
+ """Base class for NMF and MiniBatchNMF."""
1140
+
1141
+ # This prevents ``set_split_inverse_transform`` to be generated for the
1142
+ # non-standard ``W`` arg on ``inverse_transform``.
1143
+ # TODO: remove when W is removed in v1.5 for inverse_transform
1144
+ __metadata_request__inverse_transform = {"W": metadata_routing.UNUSED}
1145
+
1146
+ _parameter_constraints: dict = {
1147
+ "n_components": [
1148
+ Interval(Integral, 1, None, closed="left"),
1149
+ None,
1150
+ StrOptions({"auto"}),
1151
+ Hidden(StrOptions({"warn"})),
1152
+ ],
1153
+ "init": [
1154
+ StrOptions({"random", "nndsvd", "nndsvda", "nndsvdar", "custom"}),
1155
+ None,
1156
+ ],
1157
+ "beta_loss": [
1158
+ StrOptions({"frobenius", "kullback-leibler", "itakura-saito"}),
1159
+ Real,
1160
+ ],
1161
+ "tol": [Interval(Real, 0, None, closed="left")],
1162
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
1163
+ "random_state": ["random_state"],
1164
+ "alpha_W": [Interval(Real, 0, None, closed="left")],
1165
+ "alpha_H": [Interval(Real, 0, None, closed="left"), StrOptions({"same"})],
1166
+ "l1_ratio": [Interval(Real, 0, 1, closed="both")],
1167
+ "verbose": ["verbose"],
1168
+ }
1169
+
1170
+ def __init__(
1171
+ self,
1172
+ n_components="warn",
1173
+ *,
1174
+ init=None,
1175
+ beta_loss="frobenius",
1176
+ tol=1e-4,
1177
+ max_iter=200,
1178
+ random_state=None,
1179
+ alpha_W=0.0,
1180
+ alpha_H="same",
1181
+ l1_ratio=0.0,
1182
+ verbose=0,
1183
+ ):
1184
+ self.n_components = n_components
1185
+ self.init = init
1186
+ self.beta_loss = beta_loss
1187
+ self.tol = tol
1188
+ self.max_iter = max_iter
1189
+ self.random_state = random_state
1190
+ self.alpha_W = alpha_W
1191
+ self.alpha_H = alpha_H
1192
+ self.l1_ratio = l1_ratio
1193
+ self.verbose = verbose
1194
+
1195
+ def _check_params(self, X):
1196
+ # n_components
1197
+ self._n_components = self.n_components
1198
+ if self.n_components == "warn":
1199
+ warnings.warn(
1200
+ (
1201
+ "The default value of `n_components` will change from `None` to"
1202
+ " `'auto'` in 1.6. Set the value of `n_components` to `None`"
1203
+ " explicitly to suppress the warning."
1204
+ ),
1205
+ FutureWarning,
1206
+ )
1207
+ self._n_components = None # Keeping the old default value
1208
+ if self._n_components is None:
1209
+ self._n_components = X.shape[1]
1210
+
1211
+ # beta_loss
1212
+ self._beta_loss = _beta_loss_to_float(self.beta_loss)
1213
+
1214
+ def _check_w_h(self, X, W, H, update_H):
1215
+ """Check W and H, or initialize them."""
1216
+ n_samples, n_features = X.shape
1217
+
1218
+ if self.init == "custom" and update_H:
1219
+ _check_init(H, (self._n_components, n_features), "NMF (input H)")
1220
+ _check_init(W, (n_samples, self._n_components), "NMF (input W)")
1221
+ if self._n_components == "auto":
1222
+ self._n_components = H.shape[0]
1223
+
1224
+ if H.dtype != X.dtype or W.dtype != X.dtype:
1225
+ raise TypeError(
1226
+ "H and W should have the same dtype as X. Got "
1227
+ "H.dtype = {} and W.dtype = {}.".format(H.dtype, W.dtype)
1228
+ )
1229
+
1230
+ elif not update_H:
1231
+ if W is not None:
1232
+ warnings.warn(
1233
+ "When update_H=False, the provided initial W is not used.",
1234
+ RuntimeWarning,
1235
+ )
1236
+
1237
+ _check_init(H, (self._n_components, n_features), "NMF (input H)")
1238
+ if self._n_components == "auto":
1239
+ self._n_components = H.shape[0]
1240
+
1241
+ if H.dtype != X.dtype:
1242
+ raise TypeError(
1243
+ "H should have the same dtype as X. Got H.dtype = {}.".format(
1244
+ H.dtype
1245
+ )
1246
+ )
1247
+
1248
+ # 'mu' solver should not be initialized by zeros
1249
+ if self.solver == "mu":
1250
+ avg = np.sqrt(X.mean() / self._n_components)
1251
+ W = np.full((n_samples, self._n_components), avg, dtype=X.dtype)
1252
+ else:
1253
+ W = np.zeros((n_samples, self._n_components), dtype=X.dtype)
1254
+
1255
+ else:
1256
+ if W is not None or H is not None:
1257
+ warnings.warn(
1258
+ (
1259
+ "When init!='custom', provided W or H are ignored. Set "
1260
+ " init='custom' to use them as initialization."
1261
+ ),
1262
+ RuntimeWarning,
1263
+ )
1264
+
1265
+ if self._n_components == "auto":
1266
+ self._n_components = X.shape[1]
1267
+
1268
+ W, H = _initialize_nmf(
1269
+ X, self._n_components, init=self.init, random_state=self.random_state
1270
+ )
1271
+
1272
+ return W, H
1273
+
1274
+ def _compute_regularization(self, X):
1275
+ """Compute scaled regularization terms."""
1276
+ n_samples, n_features = X.shape
1277
+ alpha_W = self.alpha_W
1278
+ alpha_H = self.alpha_W if self.alpha_H == "same" else self.alpha_H
1279
+
1280
+ l1_reg_W = n_features * alpha_W * self.l1_ratio
1281
+ l1_reg_H = n_samples * alpha_H * self.l1_ratio
1282
+ l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio)
1283
+ l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio)
1284
+
1285
+ return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
1286
+
1287
+ def fit(self, X, y=None, **params):
1288
+ """Learn a NMF model for the data X.
1289
+
1290
+ Parameters
1291
+ ----------
1292
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1293
+ Training vector, where `n_samples` is the number of samples
1294
+ and `n_features` is the number of features.
1295
+
1296
+ y : Ignored
1297
+ Not used, present for API consistency by convention.
1298
+
1299
+ **params : kwargs
1300
+ Parameters (keyword arguments) and values passed to
1301
+ the fit_transform instance.
1302
+
1303
+ Returns
1304
+ -------
1305
+ self : object
1306
+ Returns the instance itself.
1307
+ """
1308
+ # param validation is done in fit_transform
1309
+
1310
+ self.fit_transform(X, **params)
1311
+ return self
1312
+
1313
+ def inverse_transform(self, Xt=None, W=None):
1314
+ """Transform data back to its original space.
1315
+
1316
+ .. versionadded:: 0.18
1317
+
1318
+ Parameters
1319
+ ----------
1320
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_components)
1321
+ Transformed data matrix.
1322
+
1323
+ W : deprecated
1324
+ Use `Xt` instead.
1325
+
1326
+ .. deprecated:: 1.3
1327
+
1328
+ Returns
1329
+ -------
1330
+ X : ndarray of shape (n_samples, n_features)
1331
+ Returns a data matrix of the original shape.
1332
+ """
1333
+ if Xt is None and W is None:
1334
+ raise TypeError("Missing required positional argument: Xt")
1335
+
1336
+ if W is not None and Xt is not None:
1337
+ raise ValueError("Please provide only `Xt`, and not `W`.")
1338
+
1339
+ if W is not None:
1340
+ warnings.warn(
1341
+ (
1342
+ "Input argument `W` was renamed to `Xt` in v1.3 and will be removed"
1343
+ " in v1.5."
1344
+ ),
1345
+ FutureWarning,
1346
+ )
1347
+ Xt = W
1348
+
1349
+ check_is_fitted(self)
1350
+ return Xt @ self.components_
1351
+
1352
+ @property
1353
+ def _n_features_out(self):
1354
+ """Number of transformed output features."""
1355
+ return self.components_.shape[0]
1356
+
1357
+ def _more_tags(self):
1358
+ return {
1359
+ "requires_positive_X": True,
1360
+ "preserves_dtype": [np.float64, np.float32],
1361
+ }
1362
+
1363
+
1364
+ class NMF(_BaseNMF):
1365
+ """Non-Negative Matrix Factorization (NMF).
1366
+
1367
+ Find two non-negative matrices, i.e. matrices with all non-negative elements, (W, H)
1368
+ whose product approximates the non-negative matrix X. This factorization can be used
1369
+ for example for dimensionality reduction, source separation or topic extraction.
1370
+
1371
+ The objective function is:
1372
+
1373
+ .. math::
1374
+
1375
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
1376
+
1377
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
1378
+
1379
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
1380
+
1381
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
1382
+
1383
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
1384
+
1385
+ Where:
1386
+
1387
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
1388
+
1389
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
1390
+
1391
+ The generic norm :math:`||X - WH||_{loss}` may represent
1392
+ the Frobenius norm or another supported beta-divergence loss.
1393
+ The choice between options is controlled by the `beta_loss` parameter.
1394
+
1395
+ The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
1396
+ `H` to keep their impact balanced with respect to one another and to the data fit
1397
+ term as independent as possible of the size `n_samples` of the training set.
1398
+
1399
+ The objective function is minimized with an alternating minimization of W
1400
+ and H.
1401
+
1402
+ Note that the transformed data is named W and the components matrix is named H. In
1403
+ the NMF literature, the naming convention is usually the opposite since the data
1404
+ matrix X is transposed.
1405
+
1406
+ Read more in the :ref:`User Guide <NMF>`.
1407
+
1408
+ Parameters
1409
+ ----------
1410
+ n_components : int or {'auto'} or None, default=None
1411
+ Number of components, if n_components is not set all features
1412
+ are kept.
1413
+ If `n_components='auto'`, the number of components is automatically inferred
1414
+ from W or H shapes.
1415
+
1416
+ .. versionchanged:: 1.4
1417
+ Added `'auto'` value.
1418
+
1419
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
1420
+ Method used to initialize the procedure.
1421
+ Valid options:
1422
+
1423
+ - `None`: 'nndsvda' if n_components <= min(n_samples, n_features),
1424
+ otherwise random.
1425
+
1426
+ - `'random'`: non-negative random matrices, scaled with:
1427
+ `sqrt(X.mean() / n_components)`
1428
+
1429
+ - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)
1430
+ initialization (better for sparseness)
1431
+
1432
+ - `'nndsvda'`: NNDSVD with zeros filled with the average of X
1433
+ (better when sparsity is not desired)
1434
+
1435
+ - `'nndsvdar'` NNDSVD with zeros filled with small random values
1436
+ (generally faster, less accurate alternative to NNDSVDa
1437
+ for when sparsity is not desired)
1438
+
1439
+ - `'custom'`: Use custom matrices `W` and `H` which must both be provided.
1440
+
1441
+ .. versionchanged:: 1.1
1442
+ When `init=None` and n_components is less than n_samples and n_features
1443
+ defaults to `nndsvda` instead of `nndsvd`.
1444
+
1445
+ solver : {'cd', 'mu'}, default='cd'
1446
+ Numerical solver to use:
1447
+
1448
+ - 'cd' is a Coordinate Descent solver.
1449
+ - 'mu' is a Multiplicative Update solver.
1450
+
1451
+ .. versionadded:: 0.17
1452
+ Coordinate Descent solver.
1453
+
1454
+ .. versionadded:: 0.19
1455
+ Multiplicative Update solver.
1456
+
1457
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1458
+ 'itakura-saito'}, default='frobenius'
1459
+ Beta divergence to be minimized, measuring the distance between X
1460
+ and the dot product WH. Note that values different from 'frobenius'
1461
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1462
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
1463
+ matrix X cannot contain zeros. Used only in 'mu' solver.
1464
+
1465
+ .. versionadded:: 0.19
1466
+
1467
+ tol : float, default=1e-4
1468
+ Tolerance of the stopping condition.
1469
+
1470
+ max_iter : int, default=200
1471
+ Maximum number of iterations before timing out.
1472
+
1473
+ random_state : int, RandomState instance or None, default=None
1474
+ Used for initialisation (when ``init`` == 'nndsvdar' or
1475
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1476
+ results across multiple function calls.
1477
+ See :term:`Glossary <random_state>`.
1478
+
1479
+ alpha_W : float, default=0.0
1480
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1481
+ (default) to have no regularization on `W`.
1482
+
1483
+ .. versionadded:: 1.0
1484
+
1485
+ alpha_H : float or "same", default="same"
1486
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1487
+ have no regularization on `H`. If "same" (default), it takes the same value as
1488
+ `alpha_W`.
1489
+
1490
+ .. versionadded:: 1.0
1491
+
1492
+ l1_ratio : float, default=0.0
1493
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1494
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1495
+ (aka Frobenius Norm).
1496
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1497
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1498
+
1499
+ .. versionadded:: 0.17
1500
+ Regularization parameter *l1_ratio* used in the Coordinate Descent
1501
+ solver.
1502
+
1503
+ verbose : int, default=0
1504
+ Whether to be verbose.
1505
+
1506
+ shuffle : bool, default=False
1507
+ If true, randomize the order of coordinates in the CD solver.
1508
+
1509
+ .. versionadded:: 0.17
1510
+ *shuffle* parameter used in the Coordinate Descent solver.
1511
+
1512
+ Attributes
1513
+ ----------
1514
+ components_ : ndarray of shape (n_components, n_features)
1515
+ Factorization matrix, sometimes called 'dictionary'.
1516
+
1517
+ n_components_ : int
1518
+ The number of components. It is same as the `n_components` parameter
1519
+ if it was given. Otherwise, it will be same as the number of
1520
+ features.
1521
+
1522
+ reconstruction_err_ : float
1523
+ Frobenius norm of the matrix difference, or beta-divergence, between
1524
+ the training data ``X`` and the reconstructed data ``WH`` from
1525
+ the fitted model.
1526
+
1527
+ n_iter_ : int
1528
+ Actual number of iterations.
1529
+
1530
+ n_features_in_ : int
1531
+ Number of features seen during :term:`fit`.
1532
+
1533
+ .. versionadded:: 0.24
1534
+
1535
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1536
+ Names of features seen during :term:`fit`. Defined only when `X`
1537
+ has feature names that are all strings.
1538
+
1539
+ .. versionadded:: 1.0
1540
+
1541
+ See Also
1542
+ --------
1543
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1544
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1545
+ PCA : Principal component analysis.
1546
+ SparseCoder : Find a sparse representation of data from a fixed,
1547
+ precomputed dictionary.
1548
+ SparsePCA : Sparse Principal Components Analysis.
1549
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
1550
+
1551
+ References
1552
+ ----------
1553
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1554
+ factorizations" <10.1587/transfun.E92.A.708>`
1555
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1556
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1557
+
1558
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1559
+ beta-divergence" <10.1162/NECO_a_00168>`
1560
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1561
+
1562
+ Examples
1563
+ --------
1564
+ >>> import numpy as np
1565
+ >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
1566
+ >>> from sklearn.decomposition import NMF
1567
+ >>> model = NMF(n_components=2, init='random', random_state=0)
1568
+ >>> W = model.fit_transform(X)
1569
+ >>> H = model.components_
1570
+ """
1571
+
1572
+ _parameter_constraints: dict = {
1573
+ **_BaseNMF._parameter_constraints,
1574
+ "solver": [StrOptions({"mu", "cd"})],
1575
+ "shuffle": ["boolean"],
1576
+ }
1577
+
1578
+ def __init__(
1579
+ self,
1580
+ n_components="warn",
1581
+ *,
1582
+ init=None,
1583
+ solver="cd",
1584
+ beta_loss="frobenius",
1585
+ tol=1e-4,
1586
+ max_iter=200,
1587
+ random_state=None,
1588
+ alpha_W=0.0,
1589
+ alpha_H="same",
1590
+ l1_ratio=0.0,
1591
+ verbose=0,
1592
+ shuffle=False,
1593
+ ):
1594
+ super().__init__(
1595
+ n_components=n_components,
1596
+ init=init,
1597
+ beta_loss=beta_loss,
1598
+ tol=tol,
1599
+ max_iter=max_iter,
1600
+ random_state=random_state,
1601
+ alpha_W=alpha_W,
1602
+ alpha_H=alpha_H,
1603
+ l1_ratio=l1_ratio,
1604
+ verbose=verbose,
1605
+ )
1606
+
1607
+ self.solver = solver
1608
+ self.shuffle = shuffle
1609
+
1610
+ def _check_params(self, X):
1611
+ super()._check_params(X)
1612
+
1613
+ # solver
1614
+ if self.solver != "mu" and self.beta_loss not in (2, "frobenius"):
1615
+ # 'mu' is the only solver that handles other beta losses than 'frobenius'
1616
+ raise ValueError(
1617
+ f"Invalid beta_loss parameter: solver {self.solver!r} does not handle "
1618
+ f"beta_loss = {self.beta_loss!r}"
1619
+ )
1620
+ if self.solver == "mu" and self.init == "nndsvd":
1621
+ warnings.warn(
1622
+ (
1623
+ "The multiplicative update ('mu') solver cannot update "
1624
+ "zeros present in the initialization, and so leads to "
1625
+ "poorer results when used jointly with init='nndsvd'. "
1626
+ "You may try init='nndsvda' or init='nndsvdar' instead."
1627
+ ),
1628
+ UserWarning,
1629
+ )
1630
+
1631
+ return self
1632
+
1633
+ @_fit_context(prefer_skip_nested_validation=True)
1634
+ def fit_transform(self, X, y=None, W=None, H=None):
1635
+ """Learn a NMF model for the data X and returns the transformed data.
1636
+
1637
+ This is more efficient than calling fit followed by transform.
1638
+
1639
+ Parameters
1640
+ ----------
1641
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1642
+ Training vector, where `n_samples` is the number of samples
1643
+ and `n_features` is the number of features.
1644
+
1645
+ y : Ignored
1646
+ Not used, present for API consistency by convention.
1647
+
1648
+ W : array-like of shape (n_samples, n_components), default=None
1649
+ If `init='custom'`, it is used as initial guess for the solution.
1650
+ If `None`, uses the initialisation method specified in `init`.
1651
+
1652
+ H : array-like of shape (n_components, n_features), default=None
1653
+ If `init='custom'`, it is used as initial guess for the solution.
1654
+ If `None`, uses the initialisation method specified in `init`.
1655
+
1656
+ Returns
1657
+ -------
1658
+ W : ndarray of shape (n_samples, n_components)
1659
+ Transformed data.
1660
+ """
1661
+ X = self._validate_data(
1662
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
1663
+ )
1664
+
1665
+ with config_context(assume_finite=True):
1666
+ W, H, n_iter = self._fit_transform(X, W=W, H=H)
1667
+
1668
+ self.reconstruction_err_ = _beta_divergence(
1669
+ X, W, H, self._beta_loss, square_root=True
1670
+ )
1671
+
1672
+ self.n_components_ = H.shape[0]
1673
+ self.components_ = H
1674
+ self.n_iter_ = n_iter
1675
+
1676
+ return W
1677
+
1678
+ def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
1679
+ """Learn a NMF model for the data X and returns the transformed data.
1680
+
1681
+ Parameters
1682
+ ----------
1683
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1684
+ Data matrix to be decomposed
1685
+
1686
+ y : Ignored
1687
+
1688
+ W : array-like of shape (n_samples, n_components), default=None
1689
+ If `init='custom'`, it is used as initial guess for the solution.
1690
+ If `update_H=False`, it is initialised as an array of zeros, unless
1691
+ `solver='mu'`, then it is filled with values calculated by
1692
+ `np.sqrt(X.mean() / self._n_components)`.
1693
+ If `None`, uses the initialisation method specified in `init`.
1694
+
1695
+ H : array-like of shape (n_components, n_features), default=None
1696
+ If `init='custom'`, it is used as initial guess for the solution.
1697
+ If `update_H=False`, it is used as a constant, to solve for W only.
1698
+ If `None`, uses the initialisation method specified in `init`.
1699
+
1700
+ update_H : bool, default=True
1701
+ If True, both W and H will be estimated from initial guesses,
1702
+ this corresponds to a call to the 'fit_transform' method.
1703
+ If False, only W will be estimated, this corresponds to a call
1704
+ to the 'transform' method.
1705
+
1706
+ Returns
1707
+ -------
1708
+ W : ndarray of shape (n_samples, n_components)
1709
+ Transformed data.
1710
+
1711
+ H : ndarray of shape (n_components, n_features)
1712
+ Factorization matrix, sometimes called 'dictionary'.
1713
+
1714
+ n_iter_ : int
1715
+ Actual number of iterations.
1716
+ """
1717
+ check_non_negative(X, "NMF (input X)")
1718
+
1719
+ # check parameters
1720
+ self._check_params(X)
1721
+
1722
+ if X.min() == 0 and self._beta_loss <= 0:
1723
+ raise ValueError(
1724
+ "When beta_loss <= 0 and X contains zeros, "
1725
+ "the solver may diverge. Please add small values "
1726
+ "to X, or use a positive beta_loss."
1727
+ )
1728
+
1729
+ # initialize or check W and H
1730
+ W, H = self._check_w_h(X, W, H, update_H)
1731
+
1732
+ # scale the regularization terms
1733
+ l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
1734
+
1735
+ if self.solver == "cd":
1736
+ W, H, n_iter = _fit_coordinate_descent(
1737
+ X,
1738
+ W,
1739
+ H,
1740
+ self.tol,
1741
+ self.max_iter,
1742
+ l1_reg_W,
1743
+ l1_reg_H,
1744
+ l2_reg_W,
1745
+ l2_reg_H,
1746
+ update_H=update_H,
1747
+ verbose=self.verbose,
1748
+ shuffle=self.shuffle,
1749
+ random_state=self.random_state,
1750
+ )
1751
+ elif self.solver == "mu":
1752
+ W, H, n_iter, *_ = _fit_multiplicative_update(
1753
+ X,
1754
+ W,
1755
+ H,
1756
+ self._beta_loss,
1757
+ self.max_iter,
1758
+ self.tol,
1759
+ l1_reg_W,
1760
+ l1_reg_H,
1761
+ l2_reg_W,
1762
+ l2_reg_H,
1763
+ update_H,
1764
+ self.verbose,
1765
+ )
1766
+ else:
1767
+ raise ValueError("Invalid solver parameter '%s'." % self.solver)
1768
+
1769
+ if n_iter == self.max_iter and self.tol > 0:
1770
+ warnings.warn(
1771
+ "Maximum number of iterations %d reached. Increase "
1772
+ "it to improve convergence."
1773
+ % self.max_iter,
1774
+ ConvergenceWarning,
1775
+ )
1776
+
1777
+ return W, H, n_iter
1778
+
1779
+ def transform(self, X):
1780
+ """Transform the data X according to the fitted NMF model.
1781
+
1782
+ Parameters
1783
+ ----------
1784
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1785
+ Training vector, where `n_samples` is the number of samples
1786
+ and `n_features` is the number of features.
1787
+
1788
+ Returns
1789
+ -------
1790
+ W : ndarray of shape (n_samples, n_components)
1791
+ Transformed data.
1792
+ """
1793
+ check_is_fitted(self)
1794
+ X = self._validate_data(
1795
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False
1796
+ )
1797
+
1798
+ with config_context(assume_finite=True):
1799
+ W, *_ = self._fit_transform(X, H=self.components_, update_H=False)
1800
+
1801
+ return W
1802
+
1803
+
1804
+ class MiniBatchNMF(_BaseNMF):
1805
+ """Mini-Batch Non-Negative Matrix Factorization (NMF).
1806
+
1807
+ .. versionadded:: 1.1
1808
+
1809
+ Find two non-negative matrices, i.e. matrices with all non-negative elements,
1810
+ (`W`, `H`) whose product approximates the non-negative matrix `X`. This
1811
+ factorization can be used for example for dimensionality reduction, source
1812
+ separation or topic extraction.
1813
+
1814
+ The objective function is:
1815
+
1816
+ .. math::
1817
+
1818
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
1819
+
1820
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
1821
+
1822
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
1823
+
1824
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
1825
+
1826
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
1827
+
1828
+ Where:
1829
+
1830
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
1831
+
1832
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
1833
+
1834
+ The generic norm :math:`||X - WH||_{loss}^2` may represent
1835
+ the Frobenius norm or another supported beta-divergence loss.
1836
+ The choice between options is controlled by the `beta_loss` parameter.
1837
+
1838
+ The objective function is minimized with an alternating minimization of `W`
1839
+ and `H`.
1840
+
1841
+ Note that the transformed data is named `W` and the components matrix is
1842
+ named `H`. In the NMF literature, the naming convention is usually the opposite
1843
+ since the data matrix `X` is transposed.
1844
+
1845
+ Read more in the :ref:`User Guide <MiniBatchNMF>`.
1846
+
1847
+ Parameters
1848
+ ----------
1849
+ n_components : int or {'auto'} or None, default=None
1850
+ Number of components, if `n_components` is not set all features
1851
+ are kept.
1852
+ If `n_components='auto'`, the number of components is automatically inferred
1853
+ from W or H shapes.
1854
+
1855
+ .. versionchanged:: 1.4
1856
+ Added `'auto'` value.
1857
+
1858
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
1859
+ Method used to initialize the procedure.
1860
+ Valid options:
1861
+
1862
+ - `None`: 'nndsvda' if `n_components <= min(n_samples, n_features)`,
1863
+ otherwise random.
1864
+
1865
+ - `'random'`: non-negative random matrices, scaled with:
1866
+ `sqrt(X.mean() / n_components)`
1867
+
1868
+ - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)
1869
+ initialization (better for sparseness).
1870
+
1871
+ - `'nndsvda'`: NNDSVD with zeros filled with the average of X
1872
+ (better when sparsity is not desired).
1873
+
1874
+ - `'nndsvdar'` NNDSVD with zeros filled with small random values
1875
+ (generally faster, less accurate alternative to NNDSVDa
1876
+ for when sparsity is not desired).
1877
+
1878
+ - `'custom'`: Use custom matrices `W` and `H` which must both be provided.
1879
+
1880
+ batch_size : int, default=1024
1881
+ Number of samples in each mini-batch. Large batch sizes
1882
+ give better long-term convergence at the cost of a slower start.
1883
+
1884
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1885
+ 'itakura-saito'}, default='frobenius'
1886
+ Beta divergence to be minimized, measuring the distance between `X`
1887
+ and the dot product `WH`. Note that values different from 'frobenius'
1888
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1889
+ fits. Note that for `beta_loss <= 0` (or 'itakura-saito'), the input
1890
+ matrix `X` cannot contain zeros.
1891
+
1892
+ tol : float, default=1e-4
1893
+ Control early stopping based on the norm of the differences in `H`
1894
+ between 2 steps. To disable early stopping based on changes in `H`, set
1895
+ `tol` to 0.0.
1896
+
1897
+ max_no_improvement : int, default=10
1898
+ Control early stopping based on the consecutive number of mini batches
1899
+ that does not yield an improvement on the smoothed cost function.
1900
+ To disable convergence detection based on cost function, set
1901
+ `max_no_improvement` to None.
1902
+
1903
+ max_iter : int, default=200
1904
+ Maximum number of iterations over the complete dataset before
1905
+ timing out.
1906
+
1907
+ alpha_W : float, default=0.0
1908
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1909
+ (default) to have no regularization on `W`.
1910
+
1911
+ alpha_H : float or "same", default="same"
1912
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1913
+ have no regularization on `H`. If "same" (default), it takes the same value as
1914
+ `alpha_W`.
1915
+
1916
+ l1_ratio : float, default=0.0
1917
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1918
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1919
+ (aka Frobenius Norm).
1920
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1921
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1922
+
1923
+ forget_factor : float, default=0.7
1924
+ Amount of rescaling of past information. Its value could be 1 with
1925
+ finite datasets. Choosing values < 1 is recommended with online
1926
+ learning as more recent batches will weight more than past batches.
1927
+
1928
+ fresh_restarts : bool, default=False
1929
+ Whether to completely solve for W at each step. Doing fresh restarts will likely
1930
+ lead to a better solution for a same number of iterations but it is much slower.
1931
+
1932
+ fresh_restarts_max_iter : int, default=30
1933
+ Maximum number of iterations when solving for W at each step. Only used when
1934
+ doing fresh restarts. These iterations may be stopped early based on a small
1935
+ change of W controlled by `tol`.
1936
+
1937
+ transform_max_iter : int, default=None
1938
+ Maximum number of iterations when solving for W at transform time.
1939
+ If None, it defaults to `max_iter`.
1940
+
1941
+ random_state : int, RandomState instance or None, default=None
1942
+ Used for initialisation (when ``init`` == 'nndsvdar' or
1943
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1944
+ results across multiple function calls.
1945
+ See :term:`Glossary <random_state>`.
1946
+
1947
+ verbose : bool, default=False
1948
+ Whether to be verbose.
1949
+
1950
+ Attributes
1951
+ ----------
1952
+ components_ : ndarray of shape (n_components, n_features)
1953
+ Factorization matrix, sometimes called 'dictionary'.
1954
+
1955
+ n_components_ : int
1956
+ The number of components. It is same as the `n_components` parameter
1957
+ if it was given. Otherwise, it will be same as the number of
1958
+ features.
1959
+
1960
+ reconstruction_err_ : float
1961
+ Frobenius norm of the matrix difference, or beta-divergence, between
1962
+ the training data `X` and the reconstructed data `WH` from
1963
+ the fitted model.
1964
+
1965
+ n_iter_ : int
1966
+ Actual number of started iterations over the whole dataset.
1967
+
1968
+ n_steps_ : int
1969
+ Number of mini-batches processed.
1970
+
1971
+ n_features_in_ : int
1972
+ Number of features seen during :term:`fit`.
1973
+
1974
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1975
+ Names of features seen during :term:`fit`. Defined only when `X`
1976
+ has feature names that are all strings.
1977
+
1978
+ See Also
1979
+ --------
1980
+ NMF : Non-negative matrix factorization.
1981
+ MiniBatchDictionaryLearning : Finds a dictionary that can best be used to represent
1982
+ data using a sparse code.
1983
+
1984
+ References
1985
+ ----------
1986
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1987
+ factorizations" <10.1587/transfun.E92.A.708>`
1988
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1989
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1990
+
1991
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1992
+ beta-divergence" <10.1162/NECO_a_00168>`
1993
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1994
+
1995
+ .. [3] :doi:`"Online algorithms for nonnegative matrix factorization with the
1996
+ Itakura-Saito divergence" <10.1109/ASPAA.2011.6082314>`
1997
+ Lefevre, A., Bach, F., Fevotte, C. (2011). WASPA.
1998
+
1999
+ Examples
2000
+ --------
2001
+ >>> import numpy as np
2002
+ >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
2003
+ >>> from sklearn.decomposition import MiniBatchNMF
2004
+ >>> model = MiniBatchNMF(n_components=2, init='random', random_state=0)
2005
+ >>> W = model.fit_transform(X)
2006
+ >>> H = model.components_
2007
+ """
2008
+
2009
+ _parameter_constraints: dict = {
2010
+ **_BaseNMF._parameter_constraints,
2011
+ "max_no_improvement": [Interval(Integral, 1, None, closed="left"), None],
2012
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
2013
+ "forget_factor": [Interval(Real, 0, 1, closed="both")],
2014
+ "fresh_restarts": ["boolean"],
2015
+ "fresh_restarts_max_iter": [Interval(Integral, 1, None, closed="left")],
2016
+ "transform_max_iter": [Interval(Integral, 1, None, closed="left"), None],
2017
+ }
2018
+
2019
+ def __init__(
2020
+ self,
2021
+ n_components="warn",
2022
+ *,
2023
+ init=None,
2024
+ batch_size=1024,
2025
+ beta_loss="frobenius",
2026
+ tol=1e-4,
2027
+ max_no_improvement=10,
2028
+ max_iter=200,
2029
+ alpha_W=0.0,
2030
+ alpha_H="same",
2031
+ l1_ratio=0.0,
2032
+ forget_factor=0.7,
2033
+ fresh_restarts=False,
2034
+ fresh_restarts_max_iter=30,
2035
+ transform_max_iter=None,
2036
+ random_state=None,
2037
+ verbose=0,
2038
+ ):
2039
+ super().__init__(
2040
+ n_components=n_components,
2041
+ init=init,
2042
+ beta_loss=beta_loss,
2043
+ tol=tol,
2044
+ max_iter=max_iter,
2045
+ random_state=random_state,
2046
+ alpha_W=alpha_W,
2047
+ alpha_H=alpha_H,
2048
+ l1_ratio=l1_ratio,
2049
+ verbose=verbose,
2050
+ )
2051
+
2052
+ self.max_no_improvement = max_no_improvement
2053
+ self.batch_size = batch_size
2054
+ self.forget_factor = forget_factor
2055
+ self.fresh_restarts = fresh_restarts
2056
+ self.fresh_restarts_max_iter = fresh_restarts_max_iter
2057
+ self.transform_max_iter = transform_max_iter
2058
+
2059
+ def _check_params(self, X):
2060
+ super()._check_params(X)
2061
+
2062
+ # batch_size
2063
+ self._batch_size = min(self.batch_size, X.shape[0])
2064
+
2065
+ # forget_factor
2066
+ self._rho = self.forget_factor ** (self._batch_size / X.shape[0])
2067
+
2068
+ # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
2069
+ if self._beta_loss < 1:
2070
+ self._gamma = 1.0 / (2.0 - self._beta_loss)
2071
+ elif self._beta_loss > 2:
2072
+ self._gamma = 1.0 / (self._beta_loss - 1.0)
2073
+ else:
2074
+ self._gamma = 1.0
2075
+
2076
+ # transform_max_iter
2077
+ self._transform_max_iter = (
2078
+ self.max_iter
2079
+ if self.transform_max_iter is None
2080
+ else self.transform_max_iter
2081
+ )
2082
+
2083
+ return self
2084
+
2085
+ def _solve_W(self, X, H, max_iter):
2086
+ """Minimize the objective function w.r.t W.
2087
+
2088
+ Update W with H being fixed, until convergence. This is the heart
2089
+ of `transform` but it's also used during `fit` when doing fresh restarts.
2090
+ """
2091
+ avg = np.sqrt(X.mean() / self._n_components)
2092
+ W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
2093
+ W_buffer = W.copy()
2094
+
2095
+ # Get scaled regularization terms. Done for each minibatch to take into account
2096
+ # variable sizes of minibatches.
2097
+ l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X)
2098
+
2099
+ for _ in range(max_iter):
2100
+ W, *_ = _multiplicative_update_w(
2101
+ X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
2102
+ )
2103
+
2104
+ W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
2105
+ if self.tol > 0 and W_diff <= self.tol:
2106
+ break
2107
+
2108
+ W_buffer[:] = W
2109
+
2110
+ return W
2111
+
2112
+ def _minibatch_step(self, X, W, H, update_H):
2113
+ """Perform the update of W and H for one minibatch."""
2114
+ batch_size = X.shape[0]
2115
+
2116
+ # get scaled regularization terms. Done for each minibatch to take into account
2117
+ # variable sizes of minibatches.
2118
+ l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
2119
+
2120
+ # update W
2121
+ if self.fresh_restarts or W is None:
2122
+ W = self._solve_W(X, H, self.fresh_restarts_max_iter)
2123
+ else:
2124
+ W, *_ = _multiplicative_update_w(
2125
+ X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
2126
+ )
2127
+
2128
+ # necessary for stability with beta_loss < 1
2129
+ if self._beta_loss < 1:
2130
+ W[W < np.finfo(np.float64).eps] = 0.0
2131
+
2132
+ batch_cost = (
2133
+ _beta_divergence(X, W, H, self._beta_loss)
2134
+ + l1_reg_W * W.sum()
2135
+ + l1_reg_H * H.sum()
2136
+ + l2_reg_W * (W**2).sum()
2137
+ + l2_reg_H * (H**2).sum()
2138
+ ) / batch_size
2139
+
2140
+ # update H (only at fit or fit_transform)
2141
+ if update_H:
2142
+ H[:] = _multiplicative_update_h(
2143
+ X,
2144
+ W,
2145
+ H,
2146
+ beta_loss=self._beta_loss,
2147
+ l1_reg_H=l1_reg_H,
2148
+ l2_reg_H=l2_reg_H,
2149
+ gamma=self._gamma,
2150
+ A=self._components_numerator,
2151
+ B=self._components_denominator,
2152
+ rho=self._rho,
2153
+ )
2154
+
2155
+ # necessary for stability with beta_loss < 1
2156
+ if self._beta_loss <= 1:
2157
+ H[H < np.finfo(np.float64).eps] = 0.0
2158
+
2159
+ return batch_cost
2160
+
2161
+ def _minibatch_convergence(
2162
+ self, X, batch_cost, H, H_buffer, n_samples, step, n_steps
2163
+ ):
2164
+ """Helper function to encapsulate the early stopping logic"""
2165
+ batch_size = X.shape[0]
2166
+
2167
+ # counts steps starting from 1 for user friendly verbose mode.
2168
+ step = step + 1
2169
+
2170
+ # Ignore first iteration because H is not updated yet.
2171
+ if step == 1:
2172
+ if self.verbose:
2173
+ print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
2174
+ return False
2175
+
2176
+ # Compute an Exponentially Weighted Average of the cost function to
2177
+ # monitor the convergence while discarding minibatch-local stochastic
2178
+ # variability: https://en.wikipedia.org/wiki/Moving_average
2179
+ if self._ewa_cost is None:
2180
+ self._ewa_cost = batch_cost
2181
+ else:
2182
+ alpha = batch_size / (n_samples + 1)
2183
+ alpha = min(alpha, 1)
2184
+ self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
2185
+
2186
+ # Log progress to be able to monitor convergence
2187
+ if self.verbose:
2188
+ print(
2189
+ f"Minibatch step {step}/{n_steps}: mean batch cost: "
2190
+ f"{batch_cost}, ewa cost: {self._ewa_cost}"
2191
+ )
2192
+
2193
+ # Early stopping based on change of H
2194
+ H_diff = linalg.norm(H - H_buffer) / linalg.norm(H)
2195
+ if self.tol > 0 and H_diff <= self.tol:
2196
+ if self.verbose:
2197
+ print(f"Converged (small H change) at step {step}/{n_steps}")
2198
+ return True
2199
+
2200
+ # Early stopping heuristic due to lack of improvement on smoothed
2201
+ # cost function
2202
+ if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
2203
+ self._no_improvement = 0
2204
+ self._ewa_cost_min = self._ewa_cost
2205
+ else:
2206
+ self._no_improvement += 1
2207
+
2208
+ if (
2209
+ self.max_no_improvement is not None
2210
+ and self._no_improvement >= self.max_no_improvement
2211
+ ):
2212
+ if self.verbose:
2213
+ print(
2214
+ "Converged (lack of improvement in objective function) "
2215
+ f"at step {step}/{n_steps}"
2216
+ )
2217
+ return True
2218
+
2219
+ return False
2220
+
2221
+ @_fit_context(prefer_skip_nested_validation=True)
2222
+ def fit_transform(self, X, y=None, W=None, H=None):
2223
+ """Learn a NMF model for the data X and returns the transformed data.
2224
+
2225
+ This is more efficient than calling fit followed by transform.
2226
+
2227
+ Parameters
2228
+ ----------
2229
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2230
+ Data matrix to be decomposed.
2231
+
2232
+ y : Ignored
2233
+ Not used, present here for API consistency by convention.
2234
+
2235
+ W : array-like of shape (n_samples, n_components), default=None
2236
+ If `init='custom'`, it is used as initial guess for the solution.
2237
+ If `None`, uses the initialisation method specified in `init`.
2238
+
2239
+ H : array-like of shape (n_components, n_features), default=None
2240
+ If `init='custom'`, it is used as initial guess for the solution.
2241
+ If `None`, uses the initialisation method specified in `init`.
2242
+
2243
+ Returns
2244
+ -------
2245
+ W : ndarray of shape (n_samples, n_components)
2246
+ Transformed data.
2247
+ """
2248
+ X = self._validate_data(
2249
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
2250
+ )
2251
+
2252
+ with config_context(assume_finite=True):
2253
+ W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H)
2254
+
2255
+ self.reconstruction_err_ = _beta_divergence(
2256
+ X, W, H, self._beta_loss, square_root=True
2257
+ )
2258
+
2259
+ self.n_components_ = H.shape[0]
2260
+ self.components_ = H
2261
+ self.n_iter_ = n_iter
2262
+ self.n_steps_ = n_steps
2263
+
2264
+ return W
2265
+
2266
+ def _fit_transform(self, X, W=None, H=None, update_H=True):
2267
+ """Learn a NMF model for the data X and returns the transformed data.
2268
+
2269
+ Parameters
2270
+ ----------
2271
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
2272
+ Data matrix to be decomposed.
2273
+
2274
+ W : array-like of shape (n_samples, n_components), default=None
2275
+ If `init='custom'`, it is used as initial guess for the solution.
2276
+ If `update_H=False`, it is initialised as an array of zeros, unless
2277
+ `solver='mu'`, then it is filled with values calculated by
2278
+ `np.sqrt(X.mean() / self._n_components)`.
2279
+ If `None`, uses the initialisation method specified in `init`.
2280
+
2281
+ H : array-like of shape (n_components, n_features), default=None
2282
+ If `init='custom'`, it is used as initial guess for the solution.
2283
+ If `update_H=False`, it is used as a constant, to solve for W only.
2284
+ If `None`, uses the initialisation method specified in `init`.
2285
+
2286
+ update_H : bool, default=True
2287
+ If True, both W and H will be estimated from initial guesses,
2288
+ this corresponds to a call to the `fit_transform` method.
2289
+ If False, only W will be estimated, this corresponds to a call
2290
+ to the `transform` method.
2291
+
2292
+ Returns
2293
+ -------
2294
+ W : ndarray of shape (n_samples, n_components)
2295
+ Transformed data.
2296
+
2297
+ H : ndarray of shape (n_components, n_features)
2298
+ Factorization matrix, sometimes called 'dictionary'.
2299
+
2300
+ n_iter : int
2301
+ Actual number of started iterations over the whole dataset.
2302
+
2303
+ n_steps : int
2304
+ Number of mini-batches processed.
2305
+ """
2306
+ check_non_negative(X, "MiniBatchNMF (input X)")
2307
+ self._check_params(X)
2308
+
2309
+ if X.min() == 0 and self._beta_loss <= 0:
2310
+ raise ValueError(
2311
+ "When beta_loss <= 0 and X contains zeros, "
2312
+ "the solver may diverge. Please add small values "
2313
+ "to X, or use a positive beta_loss."
2314
+ )
2315
+
2316
+ n_samples = X.shape[0]
2317
+
2318
+ # initialize or check W and H
2319
+ W, H = self._check_w_h(X, W, H, update_H)
2320
+ H_buffer = H.copy()
2321
+
2322
+ # Initialize auxiliary matrices
2323
+ self._components_numerator = H.copy()
2324
+ self._components_denominator = np.ones(H.shape, dtype=H.dtype)
2325
+
2326
+ # Attributes to monitor the convergence
2327
+ self._ewa_cost = None
2328
+ self._ewa_cost_min = None
2329
+ self._no_improvement = 0
2330
+
2331
+ batches = gen_batches(n_samples, self._batch_size)
2332
+ batches = itertools.cycle(batches)
2333
+ n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
2334
+ n_steps = self.max_iter * n_steps_per_iter
2335
+
2336
+ for i, batch in zip(range(n_steps), batches):
2337
+ batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H)
2338
+
2339
+ if update_H and self._minibatch_convergence(
2340
+ X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps
2341
+ ):
2342
+ break
2343
+
2344
+ H_buffer[:] = H
2345
+
2346
+ if self.fresh_restarts:
2347
+ W = self._solve_W(X, H, self._transform_max_iter)
2348
+
2349
+ n_steps = i + 1
2350
+ n_iter = int(np.ceil(n_steps / n_steps_per_iter))
2351
+
2352
+ if n_iter == self.max_iter and self.tol > 0:
2353
+ warnings.warn(
2354
+ (
2355
+ f"Maximum number of iterations {self.max_iter} reached. "
2356
+ "Increase it to improve convergence."
2357
+ ),
2358
+ ConvergenceWarning,
2359
+ )
2360
+
2361
+ return W, H, n_iter, n_steps
2362
+
2363
+ def transform(self, X):
2364
+ """Transform the data X according to the fitted MiniBatchNMF model.
2365
+
2366
+ Parameters
2367
+ ----------
2368
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2369
+ Data matrix to be transformed by the model.
2370
+
2371
+ Returns
2372
+ -------
2373
+ W : ndarray of shape (n_samples, n_components)
2374
+ Transformed data.
2375
+ """
2376
+ check_is_fitted(self)
2377
+ X = self._validate_data(
2378
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False
2379
+ )
2380
+
2381
+ W = self._solve_W(X, self.components_, self._transform_max_iter)
2382
+
2383
+ return W
2384
+
2385
+ @_fit_context(prefer_skip_nested_validation=True)
2386
+ def partial_fit(self, X, y=None, W=None, H=None):
2387
+ """Update the model using the data in `X` as a mini-batch.
2388
+
2389
+ This method is expected to be called several times consecutively
2390
+ on different chunks of a dataset so as to implement out-of-core
2391
+ or online learning.
2392
+
2393
+ This is especially useful when the whole dataset is too big to fit in
2394
+ memory at once (see :ref:`scaling_strategies`).
2395
+
2396
+ Parameters
2397
+ ----------
2398
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2399
+ Data matrix to be decomposed.
2400
+
2401
+ y : Ignored
2402
+ Not used, present here for API consistency by convention.
2403
+
2404
+ W : array-like of shape (n_samples, n_components), default=None
2405
+ If `init='custom'`, it is used as initial guess for the solution.
2406
+ Only used for the first call to `partial_fit`.
2407
+
2408
+ H : array-like of shape (n_components, n_features), default=None
2409
+ If `init='custom'`, it is used as initial guess for the solution.
2410
+ Only used for the first call to `partial_fit`.
2411
+
2412
+ Returns
2413
+ -------
2414
+ self
2415
+ Returns the instance itself.
2416
+ """
2417
+ has_components = hasattr(self, "components_")
2418
+
2419
+ X = self._validate_data(
2420
+ X,
2421
+ accept_sparse=("csr", "csc"),
2422
+ dtype=[np.float64, np.float32],
2423
+ reset=not has_components,
2424
+ )
2425
+
2426
+ if not has_components:
2427
+ # This instance has not been fitted yet (fit or partial_fit)
2428
+ self._check_params(X)
2429
+ _, H = self._check_w_h(X, W=W, H=H, update_H=True)
2430
+
2431
+ self._components_numerator = H.copy()
2432
+ self._components_denominator = np.ones(H.shape, dtype=H.dtype)
2433
+ self.n_steps_ = 0
2434
+ else:
2435
+ H = self.components_
2436
+
2437
+ self._minibatch_step(X, None, H, update_H=True)
2438
+
2439
+ self.n_components_ = H.shape[0]
2440
+ self.components_ = H
2441
+ self.n_steps_ += 1
2442
+
2443
+ return self
venv/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (307 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/_pca.py ADDED
@@ -0,0 +1,747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Principal Component Analysis.
2
+ """
3
+
4
+ # Author: Alexandre Gramfort <[email protected]>
5
+ # Olivier Grisel <[email protected]>
6
+ # Mathieu Blondel <[email protected]>
7
+ # Denis A. Engemann <[email protected]>
8
+ # Michael Eickenberg <[email protected]>
9
+ # Giorgio Patrini <[email protected]>
10
+ #
11
+ # License: BSD 3 clause
12
+
13
+ from math import log, sqrt
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ from scipy import linalg
18
+ from scipy.sparse import issparse
19
+ from scipy.sparse.linalg import svds
20
+ from scipy.special import gammaln
21
+
22
+ from ..base import _fit_context
23
+ from ..utils import check_random_state
24
+ from ..utils._arpack import _init_arpack_v0
25
+ from ..utils._array_api import _convert_to_numpy, get_namespace
26
+ from ..utils._param_validation import Interval, RealNotInt, StrOptions
27
+ from ..utils.extmath import fast_logdet, randomized_svd, stable_cumsum, svd_flip
28
+ from ..utils.sparsefuncs import _implicit_column_offset, mean_variance_axis
29
+ from ..utils.validation import check_is_fitted
30
+ from ._base import _BasePCA
31
+
32
+
33
+ def _assess_dimension(spectrum, rank, n_samples):
34
+ """Compute the log-likelihood of a rank ``rank`` dataset.
35
+
36
+ The dataset is assumed to be embedded in gaussian noise of shape(n,
37
+ dimf) having spectrum ``spectrum``. This implements the method of
38
+ T. P. Minka.
39
+
40
+ Parameters
41
+ ----------
42
+ spectrum : ndarray of shape (n_features,)
43
+ Data spectrum.
44
+ rank : int
45
+ Tested rank value. It should be strictly lower than n_features,
46
+ otherwise the method isn't specified (division by zero in equation
47
+ (31) from the paper).
48
+ n_samples : int
49
+ Number of samples.
50
+
51
+ Returns
52
+ -------
53
+ ll : float
54
+ The log-likelihood.
55
+
56
+ References
57
+ ----------
58
+ This implements the method of `Thomas P. Minka:
59
+ Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
60
+ <https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
61
+ """
62
+ xp, _ = get_namespace(spectrum)
63
+
64
+ n_features = spectrum.shape[0]
65
+ if not 1 <= rank < n_features:
66
+ raise ValueError("the tested rank should be in [1, n_features - 1]")
67
+
68
+ eps = 1e-15
69
+
70
+ if spectrum[rank - 1] < eps:
71
+ # When the tested rank is associated with a small eigenvalue, there's
72
+ # no point in computing the log-likelihood: it's going to be very
73
+ # small and won't be the max anyway. Also, it can lead to numerical
74
+ # issues below when computing pa, in particular in log((spectrum[i] -
75
+ # spectrum[j]) because this will take the log of something very small.
76
+ return -xp.inf
77
+
78
+ pu = -rank * log(2.0)
79
+ for i in range(1, rank + 1):
80
+ pu += (
81
+ gammaln((n_features - i + 1) / 2.0)
82
+ - log(xp.pi) * (n_features - i + 1) / 2.0
83
+ )
84
+
85
+ pl = xp.sum(xp.log(spectrum[:rank]))
86
+ pl = -pl * n_samples / 2.0
87
+
88
+ v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank))
89
+ pv = -log(v) * n_samples * (n_features - rank) / 2.0
90
+
91
+ m = n_features * rank - rank * (rank + 1.0) / 2.0
92
+ pp = log(2.0 * xp.pi) * (m + rank) / 2.0
93
+
94
+ pa = 0.0
95
+ spectrum_ = xp.asarray(spectrum, copy=True)
96
+ spectrum_[rank:n_features] = v
97
+ for i in range(rank):
98
+ for j in range(i + 1, spectrum.shape[0]):
99
+ pa += log(
100
+ (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])
101
+ ) + log(n_samples)
102
+
103
+ ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
104
+
105
+ return ll
106
+
107
+
108
+ def _infer_dimension(spectrum, n_samples):
109
+ """Infers the dimension of a dataset with a given spectrum.
110
+
111
+ The returned value will be in [1, n_features - 1].
112
+ """
113
+ xp, _ = get_namespace(spectrum)
114
+
115
+ ll = xp.empty_like(spectrum)
116
+ ll[0] = -xp.inf # we don't want to return n_components = 0
117
+ for rank in range(1, spectrum.shape[0]):
118
+ ll[rank] = _assess_dimension(spectrum, rank, n_samples)
119
+ return xp.argmax(ll)
120
+
121
+
122
+ class PCA(_BasePCA):
123
+ """Principal component analysis (PCA).
124
+
125
+ Linear dimensionality reduction using Singular Value Decomposition of the
126
+ data to project it to a lower dimensional space. The input data is centered
127
+ but not scaled for each feature before applying the SVD.
128
+
129
+ It uses the LAPACK implementation of the full SVD or a randomized truncated
130
+ SVD by the method of Halko et al. 2009, depending on the shape of the input
131
+ data and the number of components to extract.
132
+
133
+ It can also use the scipy.sparse.linalg ARPACK implementation of the
134
+ truncated SVD.
135
+
136
+ Notice that this class does not support sparse input. See
137
+ :class:`TruncatedSVD` for an alternative with sparse data.
138
+
139
+ For a usage example, see
140
+ :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py`
141
+
142
+ Read more in the :ref:`User Guide <PCA>`.
143
+
144
+ Parameters
145
+ ----------
146
+ n_components : int, float or 'mle', default=None
147
+ Number of components to keep.
148
+ if n_components is not set all components are kept::
149
+
150
+ n_components == min(n_samples, n_features)
151
+
152
+ If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
153
+ MLE is used to guess the dimension. Use of ``n_components == 'mle'``
154
+ will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
155
+
156
+ If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
157
+ number of components such that the amount of variance that needs to be
158
+ explained is greater than the percentage specified by n_components.
159
+
160
+ If ``svd_solver == 'arpack'``, the number of components must be
161
+ strictly less than the minimum of n_features and n_samples.
162
+
163
+ Hence, the None case results in::
164
+
165
+ n_components == min(n_samples, n_features) - 1
166
+
167
+ copy : bool, default=True
168
+ If False, data passed to fit are overwritten and running
169
+ fit(X).transform(X) will not yield the expected results,
170
+ use fit_transform(X) instead.
171
+
172
+ whiten : bool, default=False
173
+ When True (False by default) the `components_` vectors are multiplied
174
+ by the square root of n_samples and then divided by the singular values
175
+ to ensure uncorrelated outputs with unit component-wise variances.
176
+
177
+ Whitening will remove some information from the transformed signal
178
+ (the relative variance scales of the components) but can sometime
179
+ improve the predictive accuracy of the downstream estimators by
180
+ making their data respect some hard-wired assumptions.
181
+
182
+ svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto'
183
+ If auto :
184
+ The solver is selected by a default policy based on `X.shape` and
185
+ `n_components`: if the input data is larger than 500x500 and the
186
+ number of components to extract is lower than 80% of the smallest
187
+ dimension of the data, then the more efficient 'randomized'
188
+ method is enabled. Otherwise the exact full SVD is computed and
189
+ optionally truncated afterwards.
190
+ If full :
191
+ run exact full SVD calling the standard LAPACK solver via
192
+ `scipy.linalg.svd` and select the components by postprocessing
193
+ If arpack :
194
+ run SVD truncated to n_components calling ARPACK solver via
195
+ `scipy.sparse.linalg.svds`. It requires strictly
196
+ 0 < n_components < min(X.shape)
197
+ If randomized :
198
+ run randomized SVD by the method of Halko et al.
199
+
200
+ .. versionadded:: 0.18.0
201
+
202
+ tol : float, default=0.0
203
+ Tolerance for singular values computed by svd_solver == 'arpack'.
204
+ Must be of range [0.0, infinity).
205
+
206
+ .. versionadded:: 0.18.0
207
+
208
+ iterated_power : int or 'auto', default='auto'
209
+ Number of iterations for the power method computed by
210
+ svd_solver == 'randomized'.
211
+ Must be of range [0, infinity).
212
+
213
+ .. versionadded:: 0.18.0
214
+
215
+ n_oversamples : int, default=10
216
+ This parameter is only relevant when `svd_solver="randomized"`.
217
+ It corresponds to the additional number of random vectors to sample the
218
+ range of `X` so as to ensure proper conditioning. See
219
+ :func:`~sklearn.utils.extmath.randomized_svd` for more details.
220
+
221
+ .. versionadded:: 1.1
222
+
223
+ power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
224
+ Power iteration normalizer for randomized SVD solver.
225
+ Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
226
+ for more details.
227
+
228
+ .. versionadded:: 1.1
229
+
230
+ random_state : int, RandomState instance or None, default=None
231
+ Used when the 'arpack' or 'randomized' solvers are used. Pass an int
232
+ for reproducible results across multiple function calls.
233
+ See :term:`Glossary <random_state>`.
234
+
235
+ .. versionadded:: 0.18.0
236
+
237
+ Attributes
238
+ ----------
239
+ components_ : ndarray of shape (n_components, n_features)
240
+ Principal axes in feature space, representing the directions of
241
+ maximum variance in the data. Equivalently, the right singular
242
+ vectors of the centered input data, parallel to its eigenvectors.
243
+ The components are sorted by decreasing ``explained_variance_``.
244
+
245
+ explained_variance_ : ndarray of shape (n_components,)
246
+ The amount of variance explained by each of the selected components.
247
+ The variance estimation uses `n_samples - 1` degrees of freedom.
248
+
249
+ Equal to n_components largest eigenvalues
250
+ of the covariance matrix of X.
251
+
252
+ .. versionadded:: 0.18
253
+
254
+ explained_variance_ratio_ : ndarray of shape (n_components,)
255
+ Percentage of variance explained by each of the selected components.
256
+
257
+ If ``n_components`` is not set then all components are stored and the
258
+ sum of the ratios is equal to 1.0.
259
+
260
+ singular_values_ : ndarray of shape (n_components,)
261
+ The singular values corresponding to each of the selected components.
262
+ The singular values are equal to the 2-norms of the ``n_components``
263
+ variables in the lower-dimensional space.
264
+
265
+ .. versionadded:: 0.19
266
+
267
+ mean_ : ndarray of shape (n_features,)
268
+ Per-feature empirical mean, estimated from the training set.
269
+
270
+ Equal to `X.mean(axis=0)`.
271
+
272
+ n_components_ : int
273
+ The estimated number of components. When n_components is set
274
+ to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
275
+ number is estimated from input data. Otherwise it equals the parameter
276
+ n_components, or the lesser value of n_features and n_samples
277
+ if n_components is None.
278
+
279
+ n_samples_ : int
280
+ Number of samples in the training data.
281
+
282
+ noise_variance_ : float
283
+ The estimated noise covariance following the Probabilistic PCA model
284
+ from Tipping and Bishop 1999. See "Pattern Recognition and
285
+ Machine Learning" by C. Bishop, 12.2.1 p. 574 or
286
+ http://www.miketipping.com/papers/met-mppca.pdf. It is required to
287
+ compute the estimated data covariance and score samples.
288
+
289
+ Equal to the average of (min(n_features, n_samples) - n_components)
290
+ smallest eigenvalues of the covariance matrix of X.
291
+
292
+ n_features_in_ : int
293
+ Number of features seen during :term:`fit`.
294
+
295
+ .. versionadded:: 0.24
296
+
297
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
298
+ Names of features seen during :term:`fit`. Defined only when `X`
299
+ has feature names that are all strings.
300
+
301
+ .. versionadded:: 1.0
302
+
303
+ See Also
304
+ --------
305
+ KernelPCA : Kernel Principal Component Analysis.
306
+ SparsePCA : Sparse Principal Component Analysis.
307
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
308
+ IncrementalPCA : Incremental Principal Component Analysis.
309
+
310
+ References
311
+ ----------
312
+ For n_components == 'mle', this class uses the method from:
313
+ `Minka, T. P.. "Automatic choice of dimensionality for PCA".
314
+ In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
315
+
316
+ Implements the probabilistic PCA model from:
317
+ `Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
318
+ component analysis". Journal of the Royal Statistical Society:
319
+ Series B (Statistical Methodology), 61(3), 611-622.
320
+ <http://www.miketipping.com/papers/met-mppca.pdf>`_
321
+ via the score and score_samples methods.
322
+
323
+ For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
324
+
325
+ For svd_solver == 'randomized', see:
326
+ :doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
327
+ "Finding structure with randomness: Probabilistic algorithms for
328
+ constructing approximate matrix decompositions".
329
+ SIAM review, 53(2), 217-288.
330
+ <10.1137/090771806>`
331
+ and also
332
+ :doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
333
+ "A randomized algorithm for the decomposition of matrices".
334
+ Applied and Computational Harmonic Analysis, 30(1), 47-68.
335
+ <10.1016/j.acha.2010.02.003>`
336
+
337
+ Examples
338
+ --------
339
+ >>> import numpy as np
340
+ >>> from sklearn.decomposition import PCA
341
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
342
+ >>> pca = PCA(n_components=2)
343
+ >>> pca.fit(X)
344
+ PCA(n_components=2)
345
+ >>> print(pca.explained_variance_ratio_)
346
+ [0.9924... 0.0075...]
347
+ >>> print(pca.singular_values_)
348
+ [6.30061... 0.54980...]
349
+
350
+ >>> pca = PCA(n_components=2, svd_solver='full')
351
+ >>> pca.fit(X)
352
+ PCA(n_components=2, svd_solver='full')
353
+ >>> print(pca.explained_variance_ratio_)
354
+ [0.9924... 0.00755...]
355
+ >>> print(pca.singular_values_)
356
+ [6.30061... 0.54980...]
357
+
358
+ >>> pca = PCA(n_components=1, svd_solver='arpack')
359
+ >>> pca.fit(X)
360
+ PCA(n_components=1, svd_solver='arpack')
361
+ >>> print(pca.explained_variance_ratio_)
362
+ [0.99244...]
363
+ >>> print(pca.singular_values_)
364
+ [6.30061...]
365
+ """
366
+
367
+ _parameter_constraints: dict = {
368
+ "n_components": [
369
+ Interval(Integral, 0, None, closed="left"),
370
+ Interval(RealNotInt, 0, 1, closed="neither"),
371
+ StrOptions({"mle"}),
372
+ None,
373
+ ],
374
+ "copy": ["boolean"],
375
+ "whiten": ["boolean"],
376
+ "svd_solver": [StrOptions({"auto", "full", "arpack", "randomized"})],
377
+ "tol": [Interval(Real, 0, None, closed="left")],
378
+ "iterated_power": [
379
+ StrOptions({"auto"}),
380
+ Interval(Integral, 0, None, closed="left"),
381
+ ],
382
+ "n_oversamples": [Interval(Integral, 1, None, closed="left")],
383
+ "power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})],
384
+ "random_state": ["random_state"],
385
+ }
386
+
387
+ def __init__(
388
+ self,
389
+ n_components=None,
390
+ *,
391
+ copy=True,
392
+ whiten=False,
393
+ svd_solver="auto",
394
+ tol=0.0,
395
+ iterated_power="auto",
396
+ n_oversamples=10,
397
+ power_iteration_normalizer="auto",
398
+ random_state=None,
399
+ ):
400
+ self.n_components = n_components
401
+ self.copy = copy
402
+ self.whiten = whiten
403
+ self.svd_solver = svd_solver
404
+ self.tol = tol
405
+ self.iterated_power = iterated_power
406
+ self.n_oversamples = n_oversamples
407
+ self.power_iteration_normalizer = power_iteration_normalizer
408
+ self.random_state = random_state
409
+
410
+ @_fit_context(prefer_skip_nested_validation=True)
411
+ def fit(self, X, y=None):
412
+ """Fit the model with X.
413
+
414
+ Parameters
415
+ ----------
416
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
417
+ Training data, where `n_samples` is the number of samples
418
+ and `n_features` is the number of features.
419
+
420
+ y : Ignored
421
+ Ignored.
422
+
423
+ Returns
424
+ -------
425
+ self : object
426
+ Returns the instance itself.
427
+ """
428
+ self._fit(X)
429
+ return self
430
+
431
+ @_fit_context(prefer_skip_nested_validation=True)
432
+ def fit_transform(self, X, y=None):
433
+ """Fit the model with X and apply the dimensionality reduction on X.
434
+
435
+ Parameters
436
+ ----------
437
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
438
+ Training data, where `n_samples` is the number of samples
439
+ and `n_features` is the number of features.
440
+
441
+ y : Ignored
442
+ Ignored.
443
+
444
+ Returns
445
+ -------
446
+ X_new : ndarray of shape (n_samples, n_components)
447
+ Transformed values.
448
+
449
+ Notes
450
+ -----
451
+ This method returns a Fortran-ordered array. To convert it to a
452
+ C-ordered array, use 'np.ascontiguousarray'.
453
+ """
454
+ U, S, Vt = self._fit(X)
455
+ U = U[:, : self.n_components_]
456
+
457
+ if self.whiten:
458
+ # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
459
+ U *= sqrt(X.shape[0] - 1)
460
+ else:
461
+ # X_new = X * V = U * S * Vt * V = U * S
462
+ U *= S[: self.n_components_]
463
+
464
+ return U
465
+
466
+ def _fit(self, X):
467
+ """Dispatch to the right submethod depending on the chosen solver."""
468
+ xp, is_array_api_compliant = get_namespace(X)
469
+
470
+ # Raise an error for sparse input and unsupported svd_solver
471
+ if issparse(X) and self.svd_solver != "arpack":
472
+ raise TypeError(
473
+ 'PCA only support sparse inputs with the "arpack" solver, while '
474
+ f'"{self.svd_solver}" was passed. See TruncatedSVD for a possible'
475
+ " alternative."
476
+ )
477
+ # Raise an error for non-Numpy input and arpack solver.
478
+ if self.svd_solver == "arpack" and is_array_api_compliant:
479
+ raise ValueError(
480
+ "PCA with svd_solver='arpack' is not supported for Array API inputs."
481
+ )
482
+
483
+ X = self._validate_data(
484
+ X,
485
+ dtype=[xp.float64, xp.float32],
486
+ accept_sparse=("csr", "csc"),
487
+ ensure_2d=True,
488
+ copy=self.copy,
489
+ )
490
+
491
+ # Handle n_components==None
492
+ if self.n_components is None:
493
+ if self.svd_solver != "arpack":
494
+ n_components = min(X.shape)
495
+ else:
496
+ n_components = min(X.shape) - 1
497
+ else:
498
+ n_components = self.n_components
499
+
500
+ # Handle svd_solver
501
+ self._fit_svd_solver = self.svd_solver
502
+ if self._fit_svd_solver == "auto":
503
+ # Small problem or n_components == 'mle', just call full PCA
504
+ if max(X.shape) <= 500 or n_components == "mle":
505
+ self._fit_svd_solver = "full"
506
+ elif 1 <= n_components < 0.8 * min(X.shape):
507
+ self._fit_svd_solver = "randomized"
508
+ # This is also the case of n_components in (0,1)
509
+ else:
510
+ self._fit_svd_solver = "full"
511
+
512
+ # Call different fits for either full or truncated SVD
513
+ if self._fit_svd_solver == "full":
514
+ return self._fit_full(X, n_components)
515
+ elif self._fit_svd_solver in ["arpack", "randomized"]:
516
+ return self._fit_truncated(X, n_components, self._fit_svd_solver)
517
+
518
+ def _fit_full(self, X, n_components):
519
+ """Fit the model by computing full SVD on X."""
520
+ xp, is_array_api_compliant = get_namespace(X)
521
+
522
+ n_samples, n_features = X.shape
523
+
524
+ if n_components == "mle":
525
+ if n_samples < n_features:
526
+ raise ValueError(
527
+ "n_components='mle' is only supported if n_samples >= n_features"
528
+ )
529
+ elif not 0 <= n_components <= min(n_samples, n_features):
530
+ raise ValueError(
531
+ "n_components=%r must be between 0 and "
532
+ "min(n_samples, n_features)=%r with "
533
+ "svd_solver='full'" % (n_components, min(n_samples, n_features))
534
+ )
535
+
536
+ # Center data
537
+ self.mean_ = xp.mean(X, axis=0)
538
+ X -= self.mean_
539
+
540
+ if not is_array_api_compliant:
541
+ # Use scipy.linalg with NumPy/SciPy inputs for the sake of not
542
+ # introducing unanticipated behavior changes. In the long run we
543
+ # could instead decide to always use xp.linalg.svd for all inputs,
544
+ # but that would make this code rely on numpy's SVD instead of
545
+ # scipy's. It's not 100% clear whether they use the same LAPACK
546
+ # solver by default though (assuming both are built against the
547
+ # same BLAS).
548
+ U, S, Vt = linalg.svd(X, full_matrices=False)
549
+ else:
550
+ U, S, Vt = xp.linalg.svd(X, full_matrices=False)
551
+ # flip eigenvectors' sign to enforce deterministic output
552
+ U, Vt = svd_flip(U, Vt)
553
+
554
+ components_ = Vt
555
+
556
+ # Get variance explained by singular values
557
+ explained_variance_ = (S**2) / (n_samples - 1)
558
+ total_var = xp.sum(explained_variance_)
559
+ explained_variance_ratio_ = explained_variance_ / total_var
560
+ singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
561
+
562
+ # Postprocess the number of components required
563
+ if n_components == "mle":
564
+ n_components = _infer_dimension(explained_variance_, n_samples)
565
+ elif 0 < n_components < 1.0:
566
+ # number of components for which the cumulated explained
567
+ # variance percentage is superior to the desired threshold
568
+ # side='right' ensures that number of features selected
569
+ # their variance is always greater than n_components float
570
+ # passed. More discussion in issue: #15669
571
+ if is_array_api_compliant:
572
+ # Convert to numpy as xp.cumsum and xp.searchsorted are not
573
+ # part of the Array API standard yet:
574
+ #
575
+ # https://github.com/data-apis/array-api/issues/597
576
+ # https://github.com/data-apis/array-api/issues/688
577
+ #
578
+ # Furthermore, it's not always safe to call them for namespaces
579
+ # that already implement them: for instance as
580
+ # cupy.searchsorted does not accept a float as second argument.
581
+ explained_variance_ratio_np = _convert_to_numpy(
582
+ explained_variance_ratio_, xp=xp
583
+ )
584
+ else:
585
+ explained_variance_ratio_np = explained_variance_ratio_
586
+ ratio_cumsum = stable_cumsum(explained_variance_ratio_np)
587
+ n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1
588
+
589
+ # Compute noise covariance using Probabilistic PCA model
590
+ # The sigma2 maximum likelihood (cf. eq. 12.46)
591
+ if n_components < min(n_features, n_samples):
592
+ self.noise_variance_ = xp.mean(explained_variance_[n_components:])
593
+ else:
594
+ self.noise_variance_ = 0.0
595
+
596
+ self.n_samples_ = n_samples
597
+ self.components_ = components_[:n_components, :]
598
+ self.n_components_ = n_components
599
+ self.explained_variance_ = explained_variance_[:n_components]
600
+ self.explained_variance_ratio_ = explained_variance_ratio_[:n_components]
601
+ self.singular_values_ = singular_values_[:n_components]
602
+
603
+ return U, S, Vt
604
+
605
+ def _fit_truncated(self, X, n_components, svd_solver):
606
+ """Fit the model by computing truncated SVD (by ARPACK or randomized)
607
+ on X.
608
+ """
609
+ xp, _ = get_namespace(X)
610
+
611
+ n_samples, n_features = X.shape
612
+
613
+ if isinstance(n_components, str):
614
+ raise ValueError(
615
+ "n_components=%r cannot be a string with svd_solver='%s'"
616
+ % (n_components, svd_solver)
617
+ )
618
+ elif not 1 <= n_components <= min(n_samples, n_features):
619
+ raise ValueError(
620
+ "n_components=%r must be between 1 and "
621
+ "min(n_samples, n_features)=%r with "
622
+ "svd_solver='%s'"
623
+ % (n_components, min(n_samples, n_features), svd_solver)
624
+ )
625
+ elif svd_solver == "arpack" and n_components == min(n_samples, n_features):
626
+ raise ValueError(
627
+ "n_components=%r must be strictly less than "
628
+ "min(n_samples, n_features)=%r with "
629
+ "svd_solver='%s'"
630
+ % (n_components, min(n_samples, n_features), svd_solver)
631
+ )
632
+
633
+ random_state = check_random_state(self.random_state)
634
+
635
+ # Center data
636
+ total_var = None
637
+ if issparse(X):
638
+ self.mean_, var = mean_variance_axis(X, axis=0)
639
+ total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1
640
+ X = _implicit_column_offset(X, self.mean_)
641
+ else:
642
+ self.mean_ = xp.mean(X, axis=0)
643
+ X -= self.mean_
644
+
645
+ if svd_solver == "arpack":
646
+ v0 = _init_arpack_v0(min(X.shape), random_state)
647
+ U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0)
648
+ # svds doesn't abide by scipy.linalg.svd/randomized_svd
649
+ # conventions, so reverse its outputs.
650
+ S = S[::-1]
651
+ # flip eigenvectors' sign to enforce deterministic output
652
+ U, Vt = svd_flip(U[:, ::-1], Vt[::-1])
653
+
654
+ elif svd_solver == "randomized":
655
+ # sign flipping is done inside
656
+ U, S, Vt = randomized_svd(
657
+ X,
658
+ n_components=n_components,
659
+ n_oversamples=self.n_oversamples,
660
+ n_iter=self.iterated_power,
661
+ power_iteration_normalizer=self.power_iteration_normalizer,
662
+ flip_sign=True,
663
+ random_state=random_state,
664
+ )
665
+
666
+ self.n_samples_ = n_samples
667
+ self.components_ = Vt
668
+ self.n_components_ = n_components
669
+
670
+ # Get variance explained by singular values
671
+ self.explained_variance_ = (S**2) / (n_samples - 1)
672
+
673
+ # Workaround in-place variance calculation since at the time numpy
674
+ # did not have a way to calculate variance in-place.
675
+ #
676
+ # TODO: update this code to either:
677
+ # * Use the array-api variance calculation, unless memory usage suffers
678
+ # * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api
679
+ # See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991
680
+ if total_var is None:
681
+ N = X.shape[0] - 1
682
+ X **= 2
683
+ total_var = xp.sum(X) / N
684
+
685
+ self.explained_variance_ratio_ = self.explained_variance_ / total_var
686
+ self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
687
+
688
+ if self.n_components_ < min(n_features, n_samples):
689
+ self.noise_variance_ = total_var - xp.sum(self.explained_variance_)
690
+ self.noise_variance_ /= min(n_features, n_samples) - n_components
691
+ else:
692
+ self.noise_variance_ = 0.0
693
+
694
+ return U, S, Vt
695
+
696
+ def score_samples(self, X):
697
+ """Return the log-likelihood of each sample.
698
+
699
+ See. "Pattern Recognition and Machine Learning"
700
+ by C. Bishop, 12.2.1 p. 574
701
+ or http://www.miketipping.com/papers/met-mppca.pdf
702
+
703
+ Parameters
704
+ ----------
705
+ X : array-like of shape (n_samples, n_features)
706
+ The data.
707
+
708
+ Returns
709
+ -------
710
+ ll : ndarray of shape (n_samples,)
711
+ Log-likelihood of each sample under the current model.
712
+ """
713
+ check_is_fitted(self)
714
+ xp, _ = get_namespace(X)
715
+ X = self._validate_data(X, dtype=[xp.float64, xp.float32], reset=False)
716
+ Xr = X - self.mean_
717
+ n_features = X.shape[1]
718
+ precision = self.get_precision()
719
+ log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1)
720
+ log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
721
+ return log_like
722
+
723
+ def score(self, X, y=None):
724
+ """Return the average log-likelihood of all samples.
725
+
726
+ See. "Pattern Recognition and Machine Learning"
727
+ by C. Bishop, 12.2.1 p. 574
728
+ or http://www.miketipping.com/papers/met-mppca.pdf
729
+
730
+ Parameters
731
+ ----------
732
+ X : array-like of shape (n_samples, n_features)
733
+ The data.
734
+
735
+ y : Ignored
736
+ Ignored.
737
+
738
+ Returns
739
+ -------
740
+ ll : float
741
+ Average log-likelihood of the samples under the current model.
742
+ """
743
+ xp, _ = get_namespace(X)
744
+ return float(xp.mean(self.score_samples(X)))
745
+
746
+ def _more_tags(self):
747
+ return {"preserves_dtype": [np.float64, np.float32], "array_api_support": True}
venv/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Matrix factorization with Sparse PCA."""
2
+ # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Integral, Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import (
10
+ BaseEstimator,
11
+ ClassNamePrefixFeaturesOutMixin,
12
+ TransformerMixin,
13
+ _fit_context,
14
+ )
15
+ from ..linear_model import ridge_regression
16
+ from ..utils import check_random_state
17
+ from ..utils._param_validation import Hidden, Interval, StrOptions
18
+ from ..utils.extmath import svd_flip
19
+ from ..utils.validation import check_array, check_is_fitted
20
+ from ._dict_learning import MiniBatchDictionaryLearning, dict_learning
21
+
22
+
23
+ class _BaseSparsePCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
24
+ """Base class for SparsePCA and MiniBatchSparsePCA"""
25
+
26
+ _parameter_constraints: dict = {
27
+ "n_components": [None, Interval(Integral, 1, None, closed="left")],
28
+ "alpha": [Interval(Real, 0.0, None, closed="left")],
29
+ "ridge_alpha": [Interval(Real, 0.0, None, closed="left")],
30
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
31
+ "tol": [Interval(Real, 0.0, None, closed="left")],
32
+ "method": [StrOptions({"lars", "cd"})],
33
+ "n_jobs": [Integral, None],
34
+ "verbose": ["verbose"],
35
+ "random_state": ["random_state"],
36
+ }
37
+
38
+ def __init__(
39
+ self,
40
+ n_components=None,
41
+ *,
42
+ alpha=1,
43
+ ridge_alpha=0.01,
44
+ max_iter=1000,
45
+ tol=1e-8,
46
+ method="lars",
47
+ n_jobs=None,
48
+ verbose=False,
49
+ random_state=None,
50
+ ):
51
+ self.n_components = n_components
52
+ self.alpha = alpha
53
+ self.ridge_alpha = ridge_alpha
54
+ self.max_iter = max_iter
55
+ self.tol = tol
56
+ self.method = method
57
+ self.n_jobs = n_jobs
58
+ self.verbose = verbose
59
+ self.random_state = random_state
60
+
61
+ @_fit_context(prefer_skip_nested_validation=True)
62
+ def fit(self, X, y=None):
63
+ """Fit the model from data in X.
64
+
65
+ Parameters
66
+ ----------
67
+ X : array-like of shape (n_samples, n_features)
68
+ Training vector, where `n_samples` is the number of samples
69
+ and `n_features` is the number of features.
70
+
71
+ y : Ignored
72
+ Not used, present here for API consistency by convention.
73
+
74
+ Returns
75
+ -------
76
+ self : object
77
+ Returns the instance itself.
78
+ """
79
+ random_state = check_random_state(self.random_state)
80
+ X = self._validate_data(X)
81
+
82
+ self.mean_ = X.mean(axis=0)
83
+ X = X - self.mean_
84
+
85
+ if self.n_components is None:
86
+ n_components = X.shape[1]
87
+ else:
88
+ n_components = self.n_components
89
+
90
+ return self._fit(X, n_components, random_state)
91
+
92
+ def transform(self, X):
93
+ """Least Squares projection of the data onto the sparse components.
94
+
95
+ To avoid instability issues in case the system is under-determined,
96
+ regularization can be applied (Ridge regression) via the
97
+ `ridge_alpha` parameter.
98
+
99
+ Note that Sparse PCA components orthogonality is not enforced as in PCA
100
+ hence one cannot use a simple linear projection.
101
+
102
+ Parameters
103
+ ----------
104
+ X : ndarray of shape (n_samples, n_features)
105
+ Test data to be transformed, must have the same number of
106
+ features as the data used to train the model.
107
+
108
+ Returns
109
+ -------
110
+ X_new : ndarray of shape (n_samples, n_components)
111
+ Transformed data.
112
+ """
113
+ check_is_fitted(self)
114
+
115
+ X = self._validate_data(X, reset=False)
116
+ X = X - self.mean_
117
+
118
+ U = ridge_regression(
119
+ self.components_.T, X.T, self.ridge_alpha, solver="cholesky"
120
+ )
121
+
122
+ return U
123
+
124
+ def inverse_transform(self, X):
125
+ """Transform data from the latent space to the original space.
126
+
127
+ This inversion is an approximation due to the loss of information
128
+ induced by the forward decomposition.
129
+
130
+ .. versionadded:: 1.2
131
+
132
+ Parameters
133
+ ----------
134
+ X : ndarray of shape (n_samples, n_components)
135
+ Data in the latent space.
136
+
137
+ Returns
138
+ -------
139
+ X_original : ndarray of shape (n_samples, n_features)
140
+ Reconstructed data in the original space.
141
+ """
142
+ check_is_fitted(self)
143
+ X = check_array(X)
144
+
145
+ return (X @ self.components_) + self.mean_
146
+
147
+ @property
148
+ def _n_features_out(self):
149
+ """Number of transformed output features."""
150
+ return self.components_.shape[0]
151
+
152
+ def _more_tags(self):
153
+ return {
154
+ "preserves_dtype": [np.float64, np.float32],
155
+ }
156
+
157
+
158
+ class SparsePCA(_BaseSparsePCA):
159
+ """Sparse Principal Components Analysis (SparsePCA).
160
+
161
+ Finds the set of sparse components that can optimally reconstruct
162
+ the data. The amount of sparseness is controllable by the coefficient
163
+ of the L1 penalty, given by the parameter alpha.
164
+
165
+ Read more in the :ref:`User Guide <SparsePCA>`.
166
+
167
+ Parameters
168
+ ----------
169
+ n_components : int, default=None
170
+ Number of sparse atoms to extract. If None, then ``n_components``
171
+ is set to ``n_features``.
172
+
173
+ alpha : float, default=1
174
+ Sparsity controlling parameter. Higher values lead to sparser
175
+ components.
176
+
177
+ ridge_alpha : float, default=0.01
178
+ Amount of ridge shrinkage to apply in order to improve
179
+ conditioning when calling the transform method.
180
+
181
+ max_iter : int, default=1000
182
+ Maximum number of iterations to perform.
183
+
184
+ tol : float, default=1e-8
185
+ Tolerance for the stopping condition.
186
+
187
+ method : {'lars', 'cd'}, default='lars'
188
+ Method to be used for optimization.
189
+ lars: uses the least angle regression method to solve the lasso problem
190
+ (linear_model.lars_path)
191
+ cd: uses the coordinate descent method to compute the
192
+ Lasso solution (linear_model.Lasso). Lars will be faster if
193
+ the estimated components are sparse.
194
+
195
+ n_jobs : int, default=None
196
+ Number of parallel jobs to run.
197
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
198
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
199
+ for more details.
200
+
201
+ U_init : ndarray of shape (n_samples, n_components), default=None
202
+ Initial values for the loadings for warm restart scenarios. Only used
203
+ if `U_init` and `V_init` are not None.
204
+
205
+ V_init : ndarray of shape (n_components, n_features), default=None
206
+ Initial values for the components for warm restart scenarios. Only used
207
+ if `U_init` and `V_init` are not None.
208
+
209
+ verbose : int or bool, default=False
210
+ Controls the verbosity; the higher, the more messages. Defaults to 0.
211
+
212
+ random_state : int, RandomState instance or None, default=None
213
+ Used during dictionary learning. Pass an int for reproducible results
214
+ across multiple function calls.
215
+ See :term:`Glossary <random_state>`.
216
+
217
+ Attributes
218
+ ----------
219
+ components_ : ndarray of shape (n_components, n_features)
220
+ Sparse components extracted from the data.
221
+
222
+ error_ : ndarray
223
+ Vector of errors at each iteration.
224
+
225
+ n_components_ : int
226
+ Estimated number of components.
227
+
228
+ .. versionadded:: 0.23
229
+
230
+ n_iter_ : int
231
+ Number of iterations run.
232
+
233
+ mean_ : ndarray of shape (n_features,)
234
+ Per-feature empirical mean, estimated from the training set.
235
+ Equal to ``X.mean(axis=0)``.
236
+
237
+ n_features_in_ : int
238
+ Number of features seen during :term:`fit`.
239
+
240
+ .. versionadded:: 0.24
241
+
242
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
243
+ Names of features seen during :term:`fit`. Defined only when `X`
244
+ has feature names that are all strings.
245
+
246
+ .. versionadded:: 1.0
247
+
248
+ See Also
249
+ --------
250
+ PCA : Principal Component Analysis implementation.
251
+ MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less
252
+ accurate.
253
+ DictionaryLearning : Generic dictionary learning problem using a sparse code.
254
+
255
+ Examples
256
+ --------
257
+ >>> import numpy as np
258
+ >>> from sklearn.datasets import make_friedman1
259
+ >>> from sklearn.decomposition import SparsePCA
260
+ >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
261
+ >>> transformer = SparsePCA(n_components=5, random_state=0)
262
+ >>> transformer.fit(X)
263
+ SparsePCA(...)
264
+ >>> X_transformed = transformer.transform(X)
265
+ >>> X_transformed.shape
266
+ (200, 5)
267
+ >>> # most values in the components_ are zero (sparsity)
268
+ >>> np.mean(transformer.components_ == 0)
269
+ 0.9666...
270
+ """
271
+
272
+ _parameter_constraints: dict = {
273
+ **_BaseSparsePCA._parameter_constraints,
274
+ "U_init": [None, np.ndarray],
275
+ "V_init": [None, np.ndarray],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ n_components=None,
281
+ *,
282
+ alpha=1,
283
+ ridge_alpha=0.01,
284
+ max_iter=1000,
285
+ tol=1e-8,
286
+ method="lars",
287
+ n_jobs=None,
288
+ U_init=None,
289
+ V_init=None,
290
+ verbose=False,
291
+ random_state=None,
292
+ ):
293
+ super().__init__(
294
+ n_components=n_components,
295
+ alpha=alpha,
296
+ ridge_alpha=ridge_alpha,
297
+ max_iter=max_iter,
298
+ tol=tol,
299
+ method=method,
300
+ n_jobs=n_jobs,
301
+ verbose=verbose,
302
+ random_state=random_state,
303
+ )
304
+ self.U_init = U_init
305
+ self.V_init = V_init
306
+
307
+ def _fit(self, X, n_components, random_state):
308
+ """Specialized `fit` for SparsePCA."""
309
+
310
+ code_init = self.V_init.T if self.V_init is not None else None
311
+ dict_init = self.U_init.T if self.U_init is not None else None
312
+ code, dictionary, E, self.n_iter_ = dict_learning(
313
+ X.T,
314
+ n_components,
315
+ alpha=self.alpha,
316
+ tol=self.tol,
317
+ max_iter=self.max_iter,
318
+ method=self.method,
319
+ n_jobs=self.n_jobs,
320
+ verbose=self.verbose,
321
+ random_state=random_state,
322
+ code_init=code_init,
323
+ dict_init=dict_init,
324
+ return_n_iter=True,
325
+ )
326
+ # flip eigenvectors' sign to enforce deterministic output
327
+ code, dictionary = svd_flip(code, dictionary, u_based_decision=False)
328
+ self.components_ = code.T
329
+ components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
330
+ components_norm[components_norm == 0] = 1
331
+ self.components_ /= components_norm
332
+ self.n_components_ = len(self.components_)
333
+
334
+ self.error_ = E
335
+ return self
336
+
337
+
338
+ class MiniBatchSparsePCA(_BaseSparsePCA):
339
+ """Mini-batch Sparse Principal Components Analysis.
340
+
341
+ Finds the set of sparse components that can optimally reconstruct
342
+ the data. The amount of sparseness is controllable by the coefficient
343
+ of the L1 penalty, given by the parameter alpha.
344
+
345
+ For an example comparing sparse PCA to PCA, see
346
+ :ref:`sphx_glr_auto_examples_decomposition_plot_faces_decomposition.py`
347
+
348
+ Read more in the :ref:`User Guide <SparsePCA>`.
349
+
350
+ Parameters
351
+ ----------
352
+ n_components : int, default=None
353
+ Number of sparse atoms to extract. If None, then ``n_components``
354
+ is set to ``n_features``.
355
+
356
+ alpha : int, default=1
357
+ Sparsity controlling parameter. Higher values lead to sparser
358
+ components.
359
+
360
+ ridge_alpha : float, default=0.01
361
+ Amount of ridge shrinkage to apply in order to improve
362
+ conditioning when calling the transform method.
363
+
364
+ max_iter : int, default=1_000
365
+ Maximum number of iterations over the complete dataset before
366
+ stopping independently of any early stopping criterion heuristics.
367
+
368
+ .. versionadded:: 1.2
369
+
370
+ .. deprecated:: 1.4
371
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
372
+ Use the default value (i.e. `100`) instead.
373
+
374
+ callback : callable, default=None
375
+ Callable that gets invoked every five iterations.
376
+
377
+ batch_size : int, default=3
378
+ The number of features to take in each mini batch.
379
+
380
+ verbose : int or bool, default=False
381
+ Controls the verbosity; the higher, the more messages. Defaults to 0.
382
+
383
+ shuffle : bool, default=True
384
+ Whether to shuffle the data before splitting it in batches.
385
+
386
+ n_jobs : int, default=None
387
+ Number of parallel jobs to run.
388
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
389
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
390
+ for more details.
391
+
392
+ method : {'lars', 'cd'}, default='lars'
393
+ Method to be used for optimization.
394
+ lars: uses the least angle regression method to solve the lasso problem
395
+ (linear_model.lars_path)
396
+ cd: uses the coordinate descent method to compute the
397
+ Lasso solution (linear_model.Lasso). Lars will be faster if
398
+ the estimated components are sparse.
399
+
400
+ random_state : int, RandomState instance or None, default=None
401
+ Used for random shuffling when ``shuffle`` is set to ``True``,
402
+ during online dictionary learning. Pass an int for reproducible results
403
+ across multiple function calls.
404
+ See :term:`Glossary <random_state>`.
405
+
406
+ tol : float, default=1e-3
407
+ Control early stopping based on the norm of the differences in the
408
+ dictionary between 2 steps.
409
+
410
+ To disable early stopping based on changes in the dictionary, set
411
+ `tol` to 0.0.
412
+
413
+ .. versionadded:: 1.1
414
+
415
+ max_no_improvement : int or None, default=10
416
+ Control early stopping based on the consecutive number of mini batches
417
+ that does not yield an improvement on the smoothed cost function.
418
+
419
+ To disable convergence detection based on cost function, set
420
+ `max_no_improvement` to `None`.
421
+
422
+ .. versionadded:: 1.1
423
+
424
+ Attributes
425
+ ----------
426
+ components_ : ndarray of shape (n_components, n_features)
427
+ Sparse components extracted from the data.
428
+
429
+ n_components_ : int
430
+ Estimated number of components.
431
+
432
+ .. versionadded:: 0.23
433
+
434
+ n_iter_ : int
435
+ Number of iterations run.
436
+
437
+ mean_ : ndarray of shape (n_features,)
438
+ Per-feature empirical mean, estimated from the training set.
439
+ Equal to ``X.mean(axis=0)``.
440
+
441
+ n_features_in_ : int
442
+ Number of features seen during :term:`fit`.
443
+
444
+ .. versionadded:: 0.24
445
+
446
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
447
+ Names of features seen during :term:`fit`. Defined only when `X`
448
+ has feature names that are all strings.
449
+
450
+ .. versionadded:: 1.0
451
+
452
+ See Also
453
+ --------
454
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
455
+ IncrementalPCA : Incremental principal components analysis.
456
+ PCA : Principal component analysis.
457
+ SparsePCA : Sparse Principal Components Analysis.
458
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
459
+
460
+ Examples
461
+ --------
462
+ >>> import numpy as np
463
+ >>> from sklearn.datasets import make_friedman1
464
+ >>> from sklearn.decomposition import MiniBatchSparsePCA
465
+ >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
466
+ >>> transformer = MiniBatchSparsePCA(n_components=5, batch_size=50,
467
+ ... max_iter=10, random_state=0)
468
+ >>> transformer.fit(X)
469
+ MiniBatchSparsePCA(...)
470
+ >>> X_transformed = transformer.transform(X)
471
+ >>> X_transformed.shape
472
+ (200, 5)
473
+ >>> # most values in the components_ are zero (sparsity)
474
+ >>> np.mean(transformer.components_ == 0)
475
+ 0.9...
476
+ """
477
+
478
+ _parameter_constraints: dict = {
479
+ **_BaseSparsePCA._parameter_constraints,
480
+ "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)],
481
+ "callback": [None, callable],
482
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
483
+ "shuffle": ["boolean"],
484
+ "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
485
+ }
486
+
487
+ def __init__(
488
+ self,
489
+ n_components=None,
490
+ *,
491
+ alpha=1,
492
+ ridge_alpha=0.01,
493
+ max_iter=1_000,
494
+ callback=None,
495
+ batch_size=3,
496
+ verbose=False,
497
+ shuffle=True,
498
+ n_jobs=None,
499
+ method="lars",
500
+ random_state=None,
501
+ tol=1e-3,
502
+ max_no_improvement=10,
503
+ ):
504
+ super().__init__(
505
+ n_components=n_components,
506
+ alpha=alpha,
507
+ ridge_alpha=ridge_alpha,
508
+ max_iter=max_iter,
509
+ tol=tol,
510
+ method=method,
511
+ n_jobs=n_jobs,
512
+ verbose=verbose,
513
+ random_state=random_state,
514
+ )
515
+ self.callback = callback
516
+ self.batch_size = batch_size
517
+ self.shuffle = shuffle
518
+ self.max_no_improvement = max_no_improvement
519
+
520
+ def _fit(self, X, n_components, random_state):
521
+ """Specialized `fit` for MiniBatchSparsePCA."""
522
+
523
+ transform_algorithm = "lasso_" + self.method
524
+ est = MiniBatchDictionaryLearning(
525
+ n_components=n_components,
526
+ alpha=self.alpha,
527
+ max_iter=self.max_iter,
528
+ dict_init=None,
529
+ batch_size=self.batch_size,
530
+ shuffle=self.shuffle,
531
+ n_jobs=self.n_jobs,
532
+ fit_algorithm=self.method,
533
+ random_state=random_state,
534
+ transform_algorithm=transform_algorithm,
535
+ transform_alpha=self.alpha,
536
+ verbose=self.verbose,
537
+ callback=self.callback,
538
+ tol=self.tol,
539
+ max_no_improvement=self.max_no_improvement,
540
+ )
541
+ est.set_output(transform="default")
542
+ est.fit(X.T)
543
+
544
+ self.components_, self.n_iter_ = est.transform(X.T).T, est.n_iter_
545
+
546
+ components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
547
+ components_norm[components_norm == 0] = 1
548
+ self.components_ /= components_norm
549
+ self.n_components_ = len(self.components_)
550
+
551
+ return self
venv/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
2
+ """
3
+
4
+ # Author: Lars Buitinck
5
+ # Olivier Grisel <[email protected]>
6
+ # Michael Becker <[email protected]>
7
+ # License: 3-clause BSD.
8
+
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ import scipy.sparse as sp
13
+ from scipy.sparse.linalg import svds
14
+
15
+ from ..base import (
16
+ BaseEstimator,
17
+ ClassNamePrefixFeaturesOutMixin,
18
+ TransformerMixin,
19
+ _fit_context,
20
+ )
21
+ from ..utils import check_array, check_random_state
22
+ from ..utils._arpack import _init_arpack_v0
23
+ from ..utils._param_validation import Interval, StrOptions
24
+ from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
25
+ from ..utils.sparsefuncs import mean_variance_axis
26
+ from ..utils.validation import check_is_fitted
27
+
28
+ __all__ = ["TruncatedSVD"]
29
+
30
+
31
+ class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
32
+ """Dimensionality reduction using truncated SVD (aka LSA).
33
+
34
+ This transformer performs linear dimensionality reduction by means of
35
+ truncated singular value decomposition (SVD). Contrary to PCA, this
36
+ estimator does not center the data before computing the singular value
37
+ decomposition. This means it can work with sparse matrices
38
+ efficiently.
39
+
40
+ In particular, truncated SVD works on term count/tf-idf matrices as
41
+ returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In
42
+ that context, it is known as latent semantic analysis (LSA).
43
+
44
+ This estimator supports two algorithms: a fast randomized SVD solver, and
45
+ a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or
46
+ `X.T * X`, whichever is more efficient.
47
+
48
+ Read more in the :ref:`User Guide <LSA>`.
49
+
50
+ Parameters
51
+ ----------
52
+ n_components : int, default=2
53
+ Desired dimensionality of output data.
54
+ If algorithm='arpack', must be strictly less than the number of features.
55
+ If algorithm='randomized', must be less than or equal to the number of features.
56
+ The default value is useful for visualisation. For LSA, a value of
57
+ 100 is recommended.
58
+
59
+ algorithm : {'arpack', 'randomized'}, default='randomized'
60
+ SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
61
+ (scipy.sparse.linalg.svds), or "randomized" for the randomized
62
+ algorithm due to Halko (2009).
63
+
64
+ n_iter : int, default=5
65
+ Number of iterations for randomized SVD solver. Not used by ARPACK. The
66
+ default is larger than the default in
67
+ :func:`~sklearn.utils.extmath.randomized_svd` to handle sparse
68
+ matrices that may have large slowly decaying spectrum.
69
+
70
+ n_oversamples : int, default=10
71
+ Number of oversamples for randomized SVD solver. Not used by ARPACK.
72
+ See :func:`~sklearn.utils.extmath.randomized_svd` for a complete
73
+ description.
74
+
75
+ .. versionadded:: 1.1
76
+
77
+ power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
78
+ Power iteration normalizer for randomized SVD solver.
79
+ Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
80
+ for more details.
81
+
82
+ .. versionadded:: 1.1
83
+
84
+ random_state : int, RandomState instance or None, default=None
85
+ Used during randomized svd. Pass an int for reproducible results across
86
+ multiple function calls.
87
+ See :term:`Glossary <random_state>`.
88
+
89
+ tol : float, default=0.0
90
+ Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
91
+ SVD solver.
92
+
93
+ Attributes
94
+ ----------
95
+ components_ : ndarray of shape (n_components, n_features)
96
+ The right singular vectors of the input data.
97
+
98
+ explained_variance_ : ndarray of shape (n_components,)
99
+ The variance of the training samples transformed by a projection to
100
+ each component.
101
+
102
+ explained_variance_ratio_ : ndarray of shape (n_components,)
103
+ Percentage of variance explained by each of the selected components.
104
+
105
+ singular_values_ : ndarray of shape (n_components,)
106
+ The singular values corresponding to each of the selected components.
107
+ The singular values are equal to the 2-norms of the ``n_components``
108
+ variables in the lower-dimensional space.
109
+
110
+ n_features_in_ : int
111
+ Number of features seen during :term:`fit`.
112
+
113
+ .. versionadded:: 0.24
114
+
115
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
116
+ Names of features seen during :term:`fit`. Defined only when `X`
117
+ has feature names that are all strings.
118
+
119
+ .. versionadded:: 1.0
120
+
121
+ See Also
122
+ --------
123
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
124
+ FactorAnalysis : A simple linear generative model with
125
+ Gaussian latent variables.
126
+ IncrementalPCA : Incremental principal components analysis.
127
+ KernelPCA : Kernel Principal component analysis.
128
+ NMF : Non-Negative Matrix Factorization.
129
+ PCA : Principal component analysis.
130
+
131
+ Notes
132
+ -----
133
+ SVD suffers from a problem called "sign indeterminacy", which means the
134
+ sign of the ``components_`` and the output from transform depend on the
135
+ algorithm and random state. To work around this, fit instances of this
136
+ class to data once, then keep the instance around to do transformations.
137
+
138
+ References
139
+ ----------
140
+ :arxiv:`Halko, et al. (2009). "Finding structure with randomness:
141
+ Stochastic algorithms for constructing approximate matrix decompositions"
142
+ <0909.4061>`
143
+
144
+ Examples
145
+ --------
146
+ >>> from sklearn.decomposition import TruncatedSVD
147
+ >>> from scipy.sparse import csr_matrix
148
+ >>> import numpy as np
149
+ >>> np.random.seed(0)
150
+ >>> X_dense = np.random.rand(100, 100)
151
+ >>> X_dense[:, 2 * np.arange(50)] = 0
152
+ >>> X = csr_matrix(X_dense)
153
+ >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
154
+ >>> svd.fit(X)
155
+ TruncatedSVD(n_components=5, n_iter=7, random_state=42)
156
+ >>> print(svd.explained_variance_ratio_)
157
+ [0.0157... 0.0512... 0.0499... 0.0479... 0.0453...]
158
+ >>> print(svd.explained_variance_ratio_.sum())
159
+ 0.2102...
160
+ >>> print(svd.singular_values_)
161
+ [35.2410... 4.5981... 4.5420... 4.4486... 4.3288...]
162
+ """
163
+
164
+ _parameter_constraints: dict = {
165
+ "n_components": [Interval(Integral, 1, None, closed="left")],
166
+ "algorithm": [StrOptions({"arpack", "randomized"})],
167
+ "n_iter": [Interval(Integral, 0, None, closed="left")],
168
+ "n_oversamples": [Interval(Integral, 1, None, closed="left")],
169
+ "power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})],
170
+ "random_state": ["random_state"],
171
+ "tol": [Interval(Real, 0, None, closed="left")],
172
+ }
173
+
174
+ def __init__(
175
+ self,
176
+ n_components=2,
177
+ *,
178
+ algorithm="randomized",
179
+ n_iter=5,
180
+ n_oversamples=10,
181
+ power_iteration_normalizer="auto",
182
+ random_state=None,
183
+ tol=0.0,
184
+ ):
185
+ self.algorithm = algorithm
186
+ self.n_components = n_components
187
+ self.n_iter = n_iter
188
+ self.n_oversamples = n_oversamples
189
+ self.power_iteration_normalizer = power_iteration_normalizer
190
+ self.random_state = random_state
191
+ self.tol = tol
192
+
193
+ def fit(self, X, y=None):
194
+ """Fit model on training data X.
195
+
196
+ Parameters
197
+ ----------
198
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
199
+ Training data.
200
+
201
+ y : Ignored
202
+ Not used, present here for API consistency by convention.
203
+
204
+ Returns
205
+ -------
206
+ self : object
207
+ Returns the transformer object.
208
+ """
209
+ self.fit_transform(X)
210
+ return self
211
+
212
+ @_fit_context(prefer_skip_nested_validation=True)
213
+ def fit_transform(self, X, y=None):
214
+ """Fit model to X and perform dimensionality reduction on X.
215
+
216
+ Parameters
217
+ ----------
218
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
219
+ Training data.
220
+
221
+ y : Ignored
222
+ Not used, present here for API consistency by convention.
223
+
224
+ Returns
225
+ -------
226
+ X_new : ndarray of shape (n_samples, n_components)
227
+ Reduced version of X. This will always be a dense array.
228
+ """
229
+ X = self._validate_data(X, accept_sparse=["csr", "csc"], ensure_min_features=2)
230
+ random_state = check_random_state(self.random_state)
231
+
232
+ if self.algorithm == "arpack":
233
+ v0 = _init_arpack_v0(min(X.shape), random_state)
234
+ U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0)
235
+ # svds doesn't abide by scipy.linalg.svd/randomized_svd
236
+ # conventions, so reverse its outputs.
237
+ Sigma = Sigma[::-1]
238
+ U, VT = svd_flip(U[:, ::-1], VT[::-1])
239
+
240
+ elif self.algorithm == "randomized":
241
+ if self.n_components > X.shape[1]:
242
+ raise ValueError(
243
+ f"n_components({self.n_components}) must be <="
244
+ f" n_features({X.shape[1]})."
245
+ )
246
+ U, Sigma, VT = randomized_svd(
247
+ X,
248
+ self.n_components,
249
+ n_iter=self.n_iter,
250
+ n_oversamples=self.n_oversamples,
251
+ power_iteration_normalizer=self.power_iteration_normalizer,
252
+ random_state=random_state,
253
+ )
254
+
255
+ self.components_ = VT
256
+
257
+ # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T,
258
+ # X @ V is not the same as U @ Sigma
259
+ if self.algorithm == "randomized" or (
260
+ self.algorithm == "arpack" and self.tol > 0
261
+ ):
262
+ X_transformed = safe_sparse_dot(X, self.components_.T)
263
+ else:
264
+ X_transformed = U * Sigma
265
+
266
+ # Calculate explained variance & explained variance ratio
267
+ self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
268
+ if sp.issparse(X):
269
+ _, full_var = mean_variance_axis(X, axis=0)
270
+ full_var = full_var.sum()
271
+ else:
272
+ full_var = np.var(X, axis=0).sum()
273
+ self.explained_variance_ratio_ = exp_var / full_var
274
+ self.singular_values_ = Sigma # Store the singular values.
275
+
276
+ return X_transformed
277
+
278
+ def transform(self, X):
279
+ """Perform dimensionality reduction on X.
280
+
281
+ Parameters
282
+ ----------
283
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
284
+ New data.
285
+
286
+ Returns
287
+ -------
288
+ X_new : ndarray of shape (n_samples, n_components)
289
+ Reduced version of X. This will always be a dense array.
290
+ """
291
+ check_is_fitted(self)
292
+ X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False)
293
+ return safe_sparse_dot(X, self.components_.T)
294
+
295
+ def inverse_transform(self, X):
296
+ """Transform X back to its original space.
297
+
298
+ Returns an array X_original whose transform would be X.
299
+
300
+ Parameters
301
+ ----------
302
+ X : array-like of shape (n_samples, n_components)
303
+ New data.
304
+
305
+ Returns
306
+ -------
307
+ X_original : ndarray of shape (n_samples, n_features)
308
+ Note that this is always a dense array.
309
+ """
310
+ X = check_array(X)
311
+ return np.dot(X, self.components_)
312
+
313
+ def _more_tags(self):
314
+ return {"preserves_dtype": [np.float64, np.float32]}
315
+
316
+ @property
317
+ def _n_features_out(self):
318
+ """Number of transformed output features."""
319
+ return self.components_.shape[0]
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc ADDED
Binary file (2.95 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc ADDED
Binary file (5.92 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Christian Osendorfer <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # License: BSD3
4
+
5
+ from itertools import combinations
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from sklearn.decomposition import FactorAnalysis
11
+ from sklearn.decomposition._factor_analysis import _ortho_rotation
12
+ from sklearn.exceptions import ConvergenceWarning
13
+ from sklearn.utils._testing import (
14
+ assert_almost_equal,
15
+ assert_array_almost_equal,
16
+ ignore_warnings,
17
+ )
18
+
19
+
20
+ # Ignore warnings from switching to more power iterations in randomized_svd
21
+ @ignore_warnings
22
+ def test_factor_analysis():
23
+ # Test FactorAnalysis ability to recover the data covariance structure
24
+ rng = np.random.RandomState(0)
25
+ n_samples, n_features, n_components = 20, 5, 3
26
+
27
+ # Some random settings for the generative model
28
+ W = rng.randn(n_components, n_features)
29
+ # latent variable of dim 3, 20 of it
30
+ h = rng.randn(n_samples, n_components)
31
+ # using gamma to model different noise variance
32
+ # per component
33
+ noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
34
+
35
+ # generate observations
36
+ # wlog, mean is 0
37
+ X = np.dot(h, W) + noise
38
+
39
+ fas = []
40
+ for method in ["randomized", "lapack"]:
41
+ fa = FactorAnalysis(n_components=n_components, svd_method=method)
42
+ fa.fit(X)
43
+ fas.append(fa)
44
+
45
+ X_t = fa.transform(X)
46
+ assert X_t.shape == (n_samples, n_components)
47
+
48
+ assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
49
+ assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
50
+
51
+ diff = np.all(np.diff(fa.loglike_))
52
+ assert diff > 0.0, "Log likelihood dif not increase"
53
+
54
+ # Sample Covariance
55
+ scov = np.cov(X, rowvar=0.0, bias=1.0)
56
+
57
+ # Model Covariance
58
+ mcov = fa.get_covariance()
59
+ diff = np.sum(np.abs(scov - mcov)) / W.size
60
+ assert diff < 0.1, "Mean absolute difference is %f" % diff
61
+ fa = FactorAnalysis(
62
+ n_components=n_components, noise_variance_init=np.ones(n_features)
63
+ )
64
+ with pytest.raises(ValueError):
65
+ fa.fit(X[:, :2])
66
+
67
+ def f(x, y):
68
+ return np.abs(getattr(x, y)) # sign will not be equal
69
+
70
+ fa1, fa2 = fas
71
+ for attr in ["loglike_", "components_", "noise_variance_"]:
72
+ assert_almost_equal(f(fa1, attr), f(fa2, attr))
73
+
74
+ fa1.max_iter = 1
75
+ fa1.verbose = True
76
+ with pytest.warns(ConvergenceWarning):
77
+ fa1.fit(X)
78
+
79
+ # Test get_covariance and get_precision with n_components == n_features
80
+ # with n_components < n_features and with n_components == 0
81
+ for n_components in [0, 2, X.shape[1]]:
82
+ fa.n_components = n_components
83
+ fa.fit(X)
84
+ cov = fa.get_covariance()
85
+ precision = fa.get_precision()
86
+ assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
87
+
88
+ # test rotation
89
+ n_components = 2
90
+
91
+ results, projections = {}, {}
92
+ for method in (None, "varimax", "quartimax"):
93
+ fa_var = FactorAnalysis(n_components=n_components, rotation=method)
94
+ results[method] = fa_var.fit_transform(X)
95
+ projections[method] = fa_var.get_covariance()
96
+ for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2):
97
+ assert not np.allclose(results[rot1], results[rot2])
98
+ assert np.allclose(projections[rot1], projections[rot2], atol=3)
99
+
100
+ # test against R's psych::principal with rotate="varimax"
101
+ # (i.e., the values below stem from rotating the components in R)
102
+ # R's factor analysis returns quite different values; therefore, we only
103
+ # test the rotation itself
104
+ factors = np.array(
105
+ [
106
+ [0.89421016, -0.35854928, -0.27770122, 0.03773647],
107
+ [-0.45081822, -0.89132754, 0.0932195, -0.01787973],
108
+ [0.99500666, -0.02031465, 0.05426497, -0.11539407],
109
+ [0.96822861, -0.06299656, 0.24411001, 0.07540887],
110
+ ]
111
+ )
112
+ r_solution = np.array(
113
+ [[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]]
114
+ )
115
+ rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T
116
+ assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3)
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the fastica algorithm.
3
+ """
4
+ import itertools
5
+ import os
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytest
10
+ from scipy import stats
11
+
12
+ from sklearn.decomposition import PCA, FastICA, fastica
13
+ from sklearn.decomposition._fastica import _gs_decorrelation
14
+ from sklearn.exceptions import ConvergenceWarning
15
+ from sklearn.utils._testing import assert_allclose
16
+
17
+
18
+ def center_and_norm(x, axis=-1):
19
+ """Centers and norms x **in place**
20
+
21
+ Parameters
22
+ -----------
23
+ x: ndarray
24
+ Array with an axis of observations (statistical units) measured on
25
+ random variables.
26
+ axis: int, optional
27
+ Axis along which the mean and variance are calculated.
28
+ """
29
+ x = np.rollaxis(x, axis)
30
+ x -= x.mean(axis=0)
31
+ x /= x.std(axis=0)
32
+
33
+
34
+ def test_gs():
35
+ # Test gram schmidt orthonormalization
36
+ # generate a random orthogonal matrix
37
+ rng = np.random.RandomState(0)
38
+ W, _, _ = np.linalg.svd(rng.randn(10, 10))
39
+ w = rng.randn(10)
40
+ _gs_decorrelation(w, W, 10)
41
+ assert (w**2).sum() < 1.0e-10
42
+ w = rng.randn(10)
43
+ u = _gs_decorrelation(w, W, 5)
44
+ tmp = np.dot(u, W.T)
45
+ assert (tmp[:5] ** 2).sum() < 1.0e-10
46
+
47
+
48
+ def test_fastica_attributes_dtypes(global_dtype):
49
+ rng = np.random.RandomState(0)
50
+ X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
51
+ fica = FastICA(
52
+ n_components=5, max_iter=1000, whiten="unit-variance", random_state=0
53
+ ).fit(X)
54
+ assert fica.components_.dtype == global_dtype
55
+ assert fica.mixing_.dtype == global_dtype
56
+ assert fica.mean_.dtype == global_dtype
57
+ assert fica.whitening_.dtype == global_dtype
58
+
59
+
60
+ def test_fastica_return_dtypes(global_dtype):
61
+ rng = np.random.RandomState(0)
62
+ X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
63
+ k_, mixing_, s_ = fastica(
64
+ X, max_iter=1000, whiten="unit-variance", random_state=rng
65
+ )
66
+ assert k_.dtype == global_dtype
67
+ assert mixing_.dtype == global_dtype
68
+ assert s_.dtype == global_dtype
69
+
70
+
71
+ @pytest.mark.parametrize("add_noise", [True, False])
72
+ def test_fastica_simple(add_noise, global_random_seed, global_dtype):
73
+ if (
74
+ global_random_seed == 20
75
+ and global_dtype == np.float32
76
+ and not add_noise
77
+ and os.getenv("DISTRIB") == "ubuntu"
78
+ ):
79
+ pytest.xfail(
80
+ "FastICA instability with Ubuntu Atlas build with float32 "
81
+ "global_dtype. For more details, see "
82
+ "https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119" # noqa
83
+ )
84
+
85
+ # Test the FastICA algorithm on very simple data.
86
+ rng = np.random.RandomState(global_random_seed)
87
+ n_samples = 1000
88
+ # Generate two sources:
89
+ s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
90
+ s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed)
91
+ s = np.c_[s1, s2].T
92
+ center_and_norm(s)
93
+ s = s.astype(global_dtype)
94
+ s1, s2 = s
95
+
96
+ # Mixing angle
97
+ phi = 0.6
98
+ mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
99
+ mixing = mixing.astype(global_dtype)
100
+ m = np.dot(mixing, s)
101
+
102
+ if add_noise:
103
+ m += 0.1 * rng.randn(2, 1000)
104
+
105
+ center_and_norm(m)
106
+
107
+ # function as fun arg
108
+ def g_test(x):
109
+ return x**3, (3 * x**2).mean(axis=-1)
110
+
111
+ algos = ["parallel", "deflation"]
112
+ nls = ["logcosh", "exp", "cube", g_test]
113
+ whitening = ["arbitrary-variance", "unit-variance", False]
114
+ for algo, nl, whiten in itertools.product(algos, nls, whitening):
115
+ if whiten:
116
+ k_, mixing_, s_ = fastica(
117
+ m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng
118
+ )
119
+ with pytest.raises(ValueError):
120
+ fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo)
121
+ else:
122
+ pca = PCA(n_components=2, whiten=True, random_state=rng)
123
+ X = pca.fit_transform(m.T)
124
+ k_, mixing_, s_ = fastica(
125
+ X, fun=nl, algorithm=algo, whiten=False, random_state=rng
126
+ )
127
+ with pytest.raises(ValueError):
128
+ fastica(X, fun=np.tanh, algorithm=algo)
129
+ s_ = s_.T
130
+ # Check that the mixing model described in the docstring holds:
131
+ if whiten:
132
+ # XXX: exact reconstruction to standard relative tolerance is not
133
+ # possible. This is probably expected when add_noise is True but we
134
+ # also need a non-trivial atol in float32 when add_noise is False.
135
+ #
136
+ # Note that the 2 sources are non-Gaussian in this test.
137
+ atol = 1e-5 if global_dtype == np.float32 else 0
138
+ assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol)
139
+
140
+ center_and_norm(s_)
141
+ s1_, s2_ = s_
142
+ # Check to see if the sources have been estimated
143
+ # in the wrong order
144
+ if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
145
+ s2_, s1_ = s_
146
+ s1_ *= np.sign(np.dot(s1_, s1))
147
+ s2_ *= np.sign(np.dot(s2_, s2))
148
+
149
+ # Check that we have estimated the original sources
150
+ if not add_noise:
151
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2)
152
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2)
153
+ else:
154
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1)
155
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1)
156
+
157
+ # Test FastICA class
158
+ _, _, sources_fun = fastica(
159
+ m.T, fun=nl, algorithm=algo, random_state=global_random_seed
160
+ )
161
+ ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed)
162
+ sources = ica.fit_transform(m.T)
163
+ assert ica.components_.shape == (2, 2)
164
+ assert sources.shape == (1000, 2)
165
+
166
+ assert_allclose(sources_fun, sources)
167
+ # Set atol to account for the different magnitudes of the elements in sources
168
+ # (from 1e-4 to 1e1).
169
+ atol = np.max(np.abs(sources)) * (1e-5 if global_dtype == np.float32 else 1e-7)
170
+ assert_allclose(sources, ica.transform(m.T), atol=atol)
171
+
172
+ assert ica.mixing_.shape == (2, 2)
173
+
174
+ ica = FastICA(fun=np.tanh, algorithm=algo)
175
+ with pytest.raises(ValueError):
176
+ ica.fit(m.T)
177
+
178
+
179
+ def test_fastica_nowhiten():
180
+ m = [[0, 1], [1, 0]]
181
+
182
+ # test for issue #697
183
+ ica = FastICA(n_components=1, whiten=False, random_state=0)
184
+ warn_msg = "Ignoring n_components with whiten=False."
185
+ with pytest.warns(UserWarning, match=warn_msg):
186
+ ica.fit(m)
187
+ assert hasattr(ica, "mixing_")
188
+
189
+
190
+ def test_fastica_convergence_fail():
191
+ # Test the FastICA algorithm on very simple data
192
+ # (see test_non_square_fastica).
193
+ # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low.
194
+ rng = np.random.RandomState(0)
195
+
196
+ n_samples = 1000
197
+ # Generate two sources:
198
+ t = np.linspace(0, 100, n_samples)
199
+ s1 = np.sin(t)
200
+ s2 = np.ceil(np.sin(np.pi * t))
201
+ s = np.c_[s1, s2].T
202
+ center_and_norm(s)
203
+
204
+ # Mixing matrix
205
+ mixing = rng.randn(6, 2)
206
+ m = np.dot(mixing, s)
207
+
208
+ # Do fastICA with tolerance 0. to ensure failing convergence
209
+ warn_msg = (
210
+ "FastICA did not converge. Consider increasing tolerance "
211
+ "or the maximum number of iterations."
212
+ )
213
+ with pytest.warns(ConvergenceWarning, match=warn_msg):
214
+ ica = FastICA(
215
+ algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.0
216
+ )
217
+ ica.fit(m.T)
218
+
219
+
220
+ @pytest.mark.parametrize("add_noise", [True, False])
221
+ def test_non_square_fastica(add_noise):
222
+ # Test the FastICA algorithm on very simple data.
223
+ rng = np.random.RandomState(0)
224
+
225
+ n_samples = 1000
226
+ # Generate two sources:
227
+ t = np.linspace(0, 100, n_samples)
228
+ s1 = np.sin(t)
229
+ s2 = np.ceil(np.sin(np.pi * t))
230
+ s = np.c_[s1, s2].T
231
+ center_and_norm(s)
232
+ s1, s2 = s
233
+
234
+ # Mixing matrix
235
+ mixing = rng.randn(6, 2)
236
+ m = np.dot(mixing, s)
237
+
238
+ if add_noise:
239
+ m += 0.1 * rng.randn(6, n_samples)
240
+
241
+ center_and_norm(m)
242
+
243
+ k_, mixing_, s_ = fastica(
244
+ m.T, n_components=2, whiten="unit-variance", random_state=rng
245
+ )
246
+ s_ = s_.T
247
+
248
+ # Check that the mixing model described in the docstring holds:
249
+ assert_allclose(s_, np.dot(np.dot(mixing_, k_), m))
250
+
251
+ center_and_norm(s_)
252
+ s1_, s2_ = s_
253
+ # Check to see if the sources have been estimated
254
+ # in the wrong order
255
+ if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
256
+ s2_, s1_ = s_
257
+ s1_ *= np.sign(np.dot(s1_, s1))
258
+ s2_ *= np.sign(np.dot(s2_, s2))
259
+
260
+ # Check that we have estimated the original sources
261
+ if not add_noise:
262
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3)
263
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3)
264
+
265
+
266
+ def test_fit_transform(global_random_seed, global_dtype):
267
+ """Test unit variance of transformed data using FastICA algorithm.
268
+
269
+ Check that `fit_transform` gives the same result as applying
270
+ `fit` and then `transform`.
271
+
272
+ Bug #13056
273
+ """
274
+ # multivariate uniform data in [0, 1]
275
+ rng = np.random.RandomState(global_random_seed)
276
+ X = rng.random_sample((100, 10)).astype(global_dtype)
277
+ max_iter = 300
278
+ for whiten, n_components in [["unit-variance", 5], [False, None]]:
279
+ n_components_ = n_components if n_components is not None else X.shape[1]
280
+
281
+ ica = FastICA(
282
+ n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
283
+ )
284
+ with warnings.catch_warnings():
285
+ # make sure that numerical errors do not cause sqrt of negative
286
+ # values
287
+ warnings.simplefilter("error", RuntimeWarning)
288
+ # XXX: for some seeds, the model does not converge.
289
+ # However this is not what we test here.
290
+ warnings.simplefilter("ignore", ConvergenceWarning)
291
+ Xt = ica.fit_transform(X)
292
+ assert ica.components_.shape == (n_components_, 10)
293
+ assert Xt.shape == (X.shape[0], n_components_)
294
+
295
+ ica2 = FastICA(
296
+ n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
297
+ )
298
+ with warnings.catch_warnings():
299
+ # make sure that numerical errors do not cause sqrt of negative
300
+ # values
301
+ warnings.simplefilter("error", RuntimeWarning)
302
+ warnings.simplefilter("ignore", ConvergenceWarning)
303
+ ica2.fit(X)
304
+ assert ica2.components_.shape == (n_components_, 10)
305
+ Xt2 = ica2.transform(X)
306
+
307
+ # XXX: we have to set atol for this test to pass for all seeds when
308
+ # fitting with float32 data. Is this revealing a bug?
309
+ if global_dtype:
310
+ atol = np.abs(Xt2).mean() / 1e6
311
+ else:
312
+ atol = 0.0 # the default rtol is enough for float64 data
313
+ assert_allclose(Xt, Xt2, atol=atol)
314
+
315
+
316
+ @pytest.mark.filterwarnings("ignore:Ignoring n_components with whiten=False.")
317
+ @pytest.mark.parametrize(
318
+ "whiten, n_components, expected_mixing_shape",
319
+ [
320
+ ("arbitrary-variance", 5, (10, 5)),
321
+ ("arbitrary-variance", 10, (10, 10)),
322
+ ("unit-variance", 5, (10, 5)),
323
+ ("unit-variance", 10, (10, 10)),
324
+ (False, 5, (10, 10)),
325
+ (False, 10, (10, 10)),
326
+ ],
327
+ )
328
+ def test_inverse_transform(
329
+ whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype
330
+ ):
331
+ # Test FastICA.inverse_transform
332
+ n_samples = 100
333
+ rng = np.random.RandomState(global_random_seed)
334
+ X = rng.random_sample((n_samples, 10)).astype(global_dtype)
335
+
336
+ ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten)
337
+ with warnings.catch_warnings():
338
+ # For some dataset (depending on the value of global_dtype) the model
339
+ # can fail to converge but this should not impact the definition of
340
+ # a valid inverse transform.
341
+ warnings.simplefilter("ignore", ConvergenceWarning)
342
+ Xt = ica.fit_transform(X)
343
+ assert ica.mixing_.shape == expected_mixing_shape
344
+ X2 = ica.inverse_transform(Xt)
345
+ assert X.shape == X2.shape
346
+
347
+ # reversibility test in non-reduction case
348
+ if n_components == X.shape[1]:
349
+ # XXX: we have to set atol for this test to pass for all seeds when
350
+ # fitting with float32 data. Is this revealing a bug?
351
+ if global_dtype:
352
+ # XXX: dividing by a smaller number makes
353
+ # tests fail for some seeds.
354
+ atol = np.abs(X2).mean() / 1e5
355
+ else:
356
+ atol = 0.0 # the default rtol is enough for float64 data
357
+ assert_allclose(X, X2, atol=atol)
358
+
359
+
360
+ def test_fastica_errors():
361
+ n_features = 3
362
+ n_samples = 10
363
+ rng = np.random.RandomState(0)
364
+ X = rng.random_sample((n_samples, n_features))
365
+ w_init = rng.randn(n_features + 1, n_features + 1)
366
+ with pytest.raises(ValueError, match=r"alpha must be in \[1,2\]"):
367
+ fastica(X, fun_args={"alpha": 0})
368
+ with pytest.raises(
369
+ ValueError, match="w_init has invalid shape.+" r"should be \(3L?, 3L?\)"
370
+ ):
371
+ fastica(X, w_init=w_init)
372
+
373
+
374
+ def test_fastica_whiten_unit_variance():
375
+ """Test unit variance of transformed data using FastICA algorithm.
376
+
377
+ Bug #13056
378
+ """
379
+ rng = np.random.RandomState(0)
380
+ X = rng.random_sample((100, 10))
381
+ n_components = X.shape[1]
382
+ ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0)
383
+ Xt = ica.fit_transform(X)
384
+
385
+ assert np.var(Xt) == pytest.approx(1.0)
386
+
387
+
388
+ @pytest.mark.parametrize("whiten", ["arbitrary-variance", "unit-variance", False])
389
+ @pytest.mark.parametrize("return_X_mean", [True, False])
390
+ @pytest.mark.parametrize("return_n_iter", [True, False])
391
+ def test_fastica_output_shape(whiten, return_X_mean, return_n_iter):
392
+ n_features = 3
393
+ n_samples = 10
394
+ rng = np.random.RandomState(0)
395
+ X = rng.random_sample((n_samples, n_features))
396
+
397
+ expected_len = 3 + return_X_mean + return_n_iter
398
+
399
+ out = fastica(
400
+ X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean
401
+ )
402
+
403
+ assert len(out) == expected_len
404
+ if not whiten:
405
+ assert out[0] is None
406
+
407
+
408
+ @pytest.mark.parametrize("add_noise", [True, False])
409
+ def test_fastica_simple_different_solvers(add_noise, global_random_seed):
410
+ """Test FastICA is consistent between whiten_solvers."""
411
+ rng = np.random.RandomState(global_random_seed)
412
+ n_samples = 1000
413
+ # Generate two sources:
414
+ s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
415
+ s2 = stats.t.rvs(1, size=n_samples, random_state=rng)
416
+ s = np.c_[s1, s2].T
417
+ center_and_norm(s)
418
+ s1, s2 = s
419
+
420
+ # Mixing angle
421
+ phi = rng.rand() * 2 * np.pi
422
+ mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
423
+ m = np.dot(mixing, s)
424
+
425
+ if add_noise:
426
+ m += 0.1 * rng.randn(2, 1000)
427
+
428
+ center_and_norm(m)
429
+
430
+ outs = {}
431
+ for solver in ("svd", "eigh"):
432
+ ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver=solver)
433
+ sources = ica.fit_transform(m.T)
434
+ outs[solver] = sources
435
+ assert ica.components_.shape == (2, 2)
436
+ assert sources.shape == (1000, 2)
437
+
438
+ # compared numbers are not all on the same magnitude. Using a small atol to
439
+ # make the test less brittle
440
+ assert_allclose(outs["eigh"], outs["svd"], atol=1e-12)
441
+
442
+
443
+ def test_fastica_eigh_low_rank_warning(global_random_seed):
444
+ """Test FastICA eigh solver raises warning for low-rank data."""
445
+ rng = np.random.RandomState(global_random_seed)
446
+ A = rng.randn(10, 2)
447
+ X = A @ A.T
448
+ ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh")
449
+ msg = "There are some small singular values"
450
+ with pytest.warns(UserWarning, match=msg):
451
+ ica.fit(X)
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for Incremental PCA."""
2
+ import warnings
3
+
4
+ import numpy as np
5
+ import pytest
6
+ from numpy.testing import assert_array_equal
7
+
8
+ from sklearn import datasets
9
+ from sklearn.decomposition import PCA, IncrementalPCA
10
+ from sklearn.utils._testing import (
11
+ assert_allclose_dense_sparse,
12
+ assert_almost_equal,
13
+ assert_array_almost_equal,
14
+ )
15
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
16
+
17
+ iris = datasets.load_iris()
18
+
19
+
20
+ def test_incremental_pca():
21
+ # Incremental PCA on dense arrays.
22
+ X = iris.data
23
+ batch_size = X.shape[0] // 3
24
+ ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
25
+ pca = PCA(n_components=2)
26
+ pca.fit_transform(X)
27
+
28
+ X_transformed = ipca.fit_transform(X)
29
+
30
+ assert X_transformed.shape == (X.shape[0], 2)
31
+ np.testing.assert_allclose(
32
+ ipca.explained_variance_ratio_.sum(),
33
+ pca.explained_variance_ratio_.sum(),
34
+ rtol=1e-3,
35
+ )
36
+
37
+ for n_components in [1, 2, X.shape[1]]:
38
+ ipca = IncrementalPCA(n_components, batch_size=batch_size)
39
+ ipca.fit(X)
40
+ cov = ipca.get_covariance()
41
+ precision = ipca.get_precision()
42
+ np.testing.assert_allclose(
43
+ np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13
44
+ )
45
+
46
+
47
+ @pytest.mark.parametrize(
48
+ "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS
49
+ )
50
+ def test_incremental_pca_sparse(sparse_container):
51
+ # Incremental PCA on sparse arrays.
52
+ X = iris.data
53
+ pca = PCA(n_components=2)
54
+ pca.fit_transform(X)
55
+ X_sparse = sparse_container(X)
56
+ batch_size = X_sparse.shape[0] // 3
57
+ ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
58
+
59
+ X_transformed = ipca.fit_transform(X_sparse)
60
+
61
+ assert X_transformed.shape == (X_sparse.shape[0], 2)
62
+ np.testing.assert_allclose(
63
+ ipca.explained_variance_ratio_.sum(),
64
+ pca.explained_variance_ratio_.sum(),
65
+ rtol=1e-3,
66
+ )
67
+
68
+ for n_components in [1, 2, X.shape[1]]:
69
+ ipca = IncrementalPCA(n_components, batch_size=batch_size)
70
+ ipca.fit(X_sparse)
71
+ cov = ipca.get_covariance()
72
+ precision = ipca.get_precision()
73
+ np.testing.assert_allclose(
74
+ np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13
75
+ )
76
+
77
+ with pytest.raises(
78
+ TypeError,
79
+ match=(
80
+ "IncrementalPCA.partial_fit does not support "
81
+ "sparse input. Either convert data to dense "
82
+ "or use IncrementalPCA.fit to do so in batches."
83
+ ),
84
+ ):
85
+ ipca.partial_fit(X_sparse)
86
+
87
+
88
+ def test_incremental_pca_check_projection():
89
+ # Test that the projection of data is correct.
90
+ rng = np.random.RandomState(1999)
91
+ n, p = 100, 3
92
+ X = rng.randn(n, p) * 0.1
93
+ X[:10] += np.array([3, 4, 5])
94
+ Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
95
+
96
+ # Get the reconstruction of the generated data X
97
+ # Note that Xt has the same "components" as X, just separated
98
+ # This is what we want to ensure is recreated correctly
99
+ Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
100
+
101
+ # Normalize
102
+ Yt /= np.sqrt((Yt**2).sum())
103
+
104
+ # Make sure that the first element of Yt is ~1, this means
105
+ # the reconstruction worked as expected
106
+ assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1)
107
+
108
+
109
+ def test_incremental_pca_inverse():
110
+ # Test that the projection of data can be inverted.
111
+ rng = np.random.RandomState(1999)
112
+ n, p = 50, 3
113
+ X = rng.randn(n, p) # spherical data
114
+ X[:, 1] *= 0.00001 # make middle component relatively small
115
+ X += [5, 4, 3] # make a large mean
116
+
117
+ # same check that we can find the original data from the transformed
118
+ # signal (since the data is almost of rank n_components)
119
+ ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
120
+ Y = ipca.transform(X)
121
+ Y_inverse = ipca.inverse_transform(Y)
122
+ assert_almost_equal(X, Y_inverse, decimal=3)
123
+
124
+
125
+ def test_incremental_pca_validation():
126
+ # Test that n_components is <= n_features.
127
+ X = np.array([[0, 1, 0], [1, 0, 0]])
128
+ n_samples, n_features = X.shape
129
+ n_components = 4
130
+ with pytest.raises(
131
+ ValueError,
132
+ match=(
133
+ "n_components={} invalid"
134
+ " for n_features={}, need more rows than"
135
+ " columns for IncrementalPCA"
136
+ " processing".format(n_components, n_features)
137
+ ),
138
+ ):
139
+ IncrementalPCA(n_components, batch_size=10).fit(X)
140
+
141
+ # Tests that n_components is also <= n_samples.
142
+ n_components = 3
143
+ with pytest.raises(
144
+ ValueError,
145
+ match=(
146
+ "n_components={} must be"
147
+ " less or equal to the batch number of"
148
+ " samples {}".format(n_components, n_samples)
149
+ ),
150
+ ):
151
+ IncrementalPCA(n_components=n_components).partial_fit(X)
152
+
153
+
154
+ def test_n_samples_equal_n_components():
155
+ # Ensures no warning is raised when n_samples==n_components
156
+ # Non-regression test for gh-19050
157
+ ipca = IncrementalPCA(n_components=5)
158
+ with warnings.catch_warnings():
159
+ warnings.simplefilter("error", RuntimeWarning)
160
+ ipca.partial_fit(np.random.randn(5, 7))
161
+ with warnings.catch_warnings():
162
+ warnings.simplefilter("error", RuntimeWarning)
163
+ ipca.fit(np.random.randn(5, 7))
164
+
165
+
166
+ def test_n_components_none():
167
+ # Ensures that n_components == None is handled correctly
168
+ rng = np.random.RandomState(1999)
169
+ for n_samples, n_features in [(50, 10), (10, 50)]:
170
+ X = rng.rand(n_samples, n_features)
171
+ ipca = IncrementalPCA(n_components=None)
172
+
173
+ # First partial_fit call, ipca.n_components_ is inferred from
174
+ # min(X.shape)
175
+ ipca.partial_fit(X)
176
+ assert ipca.n_components_ == min(X.shape)
177
+
178
+ # Second partial_fit call, ipca.n_components_ is inferred from
179
+ # ipca.components_ computed from the first partial_fit call
180
+ ipca.partial_fit(X)
181
+ assert ipca.n_components_ == ipca.components_.shape[0]
182
+
183
+
184
+ def test_incremental_pca_set_params():
185
+ # Test that components_ sign is stable over batch sizes.
186
+ rng = np.random.RandomState(1999)
187
+ n_samples = 100
188
+ n_features = 20
189
+ X = rng.randn(n_samples, n_features)
190
+ X2 = rng.randn(n_samples, n_features)
191
+ X3 = rng.randn(n_samples, n_features)
192
+ ipca = IncrementalPCA(n_components=20)
193
+ ipca.fit(X)
194
+ # Decreasing number of components
195
+ ipca.set_params(n_components=10)
196
+ with pytest.raises(ValueError):
197
+ ipca.partial_fit(X2)
198
+ # Increasing number of components
199
+ ipca.set_params(n_components=15)
200
+ with pytest.raises(ValueError):
201
+ ipca.partial_fit(X3)
202
+ # Returning to original setting
203
+ ipca.set_params(n_components=20)
204
+ ipca.partial_fit(X)
205
+
206
+
207
+ def test_incremental_pca_num_features_change():
208
+ # Test that changing n_components will raise an error.
209
+ rng = np.random.RandomState(1999)
210
+ n_samples = 100
211
+ X = rng.randn(n_samples, 20)
212
+ X2 = rng.randn(n_samples, 50)
213
+ ipca = IncrementalPCA(n_components=None)
214
+ ipca.fit(X)
215
+ with pytest.raises(ValueError):
216
+ ipca.partial_fit(X2)
217
+
218
+
219
+ def test_incremental_pca_batch_signs():
220
+ # Test that components_ sign is stable over batch sizes.
221
+ rng = np.random.RandomState(1999)
222
+ n_samples = 100
223
+ n_features = 3
224
+ X = rng.randn(n_samples, n_features)
225
+ all_components = []
226
+ batch_sizes = np.arange(10, 20)
227
+ for batch_size in batch_sizes:
228
+ ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
229
+ all_components.append(ipca.components_)
230
+
231
+ for i, j in zip(all_components[:-1], all_components[1:]):
232
+ assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
233
+
234
+
235
+ def test_incremental_pca_batch_values():
236
+ # Test that components_ values are stable over batch sizes.
237
+ rng = np.random.RandomState(1999)
238
+ n_samples = 100
239
+ n_features = 3
240
+ X = rng.randn(n_samples, n_features)
241
+ all_components = []
242
+ batch_sizes = np.arange(20, 40, 3)
243
+ for batch_size in batch_sizes:
244
+ ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
245
+ all_components.append(ipca.components_)
246
+
247
+ for i, j in zip(all_components[:-1], all_components[1:]):
248
+ assert_almost_equal(i, j, decimal=1)
249
+
250
+
251
+ def test_incremental_pca_batch_rank():
252
+ # Test sample size in each batch is always larger or equal to n_components
253
+ rng = np.random.RandomState(1999)
254
+ n_samples = 100
255
+ n_features = 20
256
+ X = rng.randn(n_samples, n_features)
257
+ all_components = []
258
+ batch_sizes = np.arange(20, 90, 3)
259
+ for batch_size in batch_sizes:
260
+ ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X)
261
+ all_components.append(ipca.components_)
262
+
263
+ for components_i, components_j in zip(all_components[:-1], all_components[1:]):
264
+ assert_allclose_dense_sparse(components_i, components_j)
265
+
266
+
267
+ def test_incremental_pca_partial_fit():
268
+ # Test that fit and partial_fit get equivalent results.
269
+ rng = np.random.RandomState(1999)
270
+ n, p = 50, 3
271
+ X = rng.randn(n, p) # spherical data
272
+ X[:, 1] *= 0.00001 # make middle component relatively small
273
+ X += [5, 4, 3] # make a large mean
274
+
275
+ # same check that we can find the original data from the transformed
276
+ # signal (since the data is almost of rank n_components)
277
+ batch_size = 10
278
+ ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
279
+ pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
280
+ # Add one to make sure endpoint is included
281
+ batch_itr = np.arange(0, n + 1, batch_size)
282
+ for i, j in zip(batch_itr[:-1], batch_itr[1:]):
283
+ pipca.partial_fit(X[i:j, :])
284
+ assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
285
+
286
+
287
+ def test_incremental_pca_against_pca_iris():
288
+ # Test that IncrementalPCA and PCA are approximate (to a sign flip).
289
+ X = iris.data
290
+
291
+ Y_pca = PCA(n_components=2).fit_transform(X)
292
+ Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
293
+
294
+ assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
295
+
296
+
297
+ def test_incremental_pca_against_pca_random_data():
298
+ # Test that IncrementalPCA and PCA are approximate (to a sign flip).
299
+ rng = np.random.RandomState(1999)
300
+ n_samples = 100
301
+ n_features = 3
302
+ X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
303
+
304
+ Y_pca = PCA(n_components=3).fit_transform(X)
305
+ Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
306
+
307
+ assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
308
+
309
+
310
+ def test_explained_variances():
311
+ # Test that PCA and IncrementalPCA calculations match
312
+ X = datasets.make_low_rank_matrix(
313
+ 1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999
314
+ )
315
+ prec = 3
316
+ n_samples, n_features = X.shape
317
+ for nc in [None, 99]:
318
+ pca = PCA(n_components=nc).fit(X)
319
+ ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
320
+ assert_almost_equal(
321
+ pca.explained_variance_, ipca.explained_variance_, decimal=prec
322
+ )
323
+ assert_almost_equal(
324
+ pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec
325
+ )
326
+ assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec)
327
+
328
+
329
+ def test_singular_values():
330
+ # Check that the IncrementalPCA output has the correct singular values
331
+
332
+ rng = np.random.RandomState(0)
333
+ n_samples = 1000
334
+ n_features = 100
335
+
336
+ X = datasets.make_low_rank_matrix(
337
+ n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng
338
+ )
339
+
340
+ pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X)
341
+ ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
342
+ assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
343
+
344
+ # Compare to the Frobenius norm
345
+ X_pca = pca.transform(X)
346
+ X_ipca = ipca.transform(X)
347
+ assert_array_almost_equal(
348
+ np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12
349
+ )
350
+ assert_array_almost_equal(
351
+ np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2
352
+ )
353
+
354
+ # Compare to the 2-norms of the score vectors
355
+ assert_array_almost_equal(
356
+ pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12
357
+ )
358
+ assert_array_almost_equal(
359
+ ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2
360
+ )
361
+
362
+ # Set the singular values and see what we get back
363
+ rng = np.random.RandomState(0)
364
+ n_samples = 100
365
+ n_features = 110
366
+
367
+ X = datasets.make_low_rank_matrix(
368
+ n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng
369
+ )
370
+
371
+ pca = PCA(n_components=3, svd_solver="full", random_state=rng)
372
+ ipca = IncrementalPCA(n_components=3, batch_size=100)
373
+
374
+ X_pca = pca.fit_transform(X)
375
+ X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
376
+ X_pca[:, 0] *= 3.142
377
+ X_pca[:, 1] *= 2.718
378
+
379
+ X_hat = np.dot(X_pca, pca.components_)
380
+ pca.fit(X_hat)
381
+ ipca.fit(X_hat)
382
+ assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
383
+ assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
384
+
385
+
386
+ def test_whitening():
387
+ # Test that PCA and IncrementalPCA transforms match to sign flip.
388
+ X = datasets.make_low_rank_matrix(
389
+ 1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999
390
+ )
391
+ prec = 3
392
+ n_samples, n_features = X.shape
393
+ for nc in [None, 9]:
394
+ pca = PCA(whiten=True, n_components=nc).fit(X)
395
+ ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X)
396
+
397
+ Xt_pca = pca.transform(X)
398
+ Xt_ipca = ipca.transform(X)
399
+ assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
400
+ Xinv_ipca = ipca.inverse_transform(Xt_ipca)
401
+ Xinv_pca = pca.inverse_transform(Xt_pca)
402
+ assert_almost_equal(X, Xinv_ipca, decimal=prec)
403
+ assert_almost_equal(X, Xinv_pca, decimal=prec)
404
+ assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
405
+
406
+
407
+ def test_incremental_pca_partial_fit_float_division():
408
+ # Test to ensure float division is used in all versions of Python
409
+ # (non-regression test for issue #9489)
410
+
411
+ rng = np.random.RandomState(0)
412
+ A = rng.randn(5, 3) + 2
413
+ B = rng.randn(7, 3) + 5
414
+
415
+ pca = IncrementalPCA(n_components=2)
416
+ pca.partial_fit(A)
417
+ # Set n_samples_seen_ to be a floating point number instead of an int
418
+ pca.n_samples_seen_ = float(pca.n_samples_seen_)
419
+ pca.partial_fit(B)
420
+ singular_vals_float_samples_seen = pca.singular_values_
421
+
422
+ pca2 = IncrementalPCA(n_components=2)
423
+ pca2.partial_fit(A)
424
+ pca2.partial_fit(B)
425
+ singular_vals_int_samples_seen = pca2.singular_values_
426
+
427
+ np.testing.assert_allclose(
428
+ singular_vals_float_samples_seen, singular_vals_int_samples_seen
429
+ )
430
+
431
+
432
+ def test_incremental_pca_fit_overflow_error():
433
+ # Test for overflow error on Windows OS
434
+ # (non-regression test for issue #17693)
435
+ rng = np.random.RandomState(0)
436
+ A = rng.rand(500000, 2)
437
+
438
+ ipca = IncrementalPCA(n_components=2, batch_size=10000)
439
+ ipca.fit(A)
440
+
441
+ pca = PCA(n_components=2)
442
+ pca.fit(A)
443
+
444
+ np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_)
445
+
446
+
447
+ def test_incremental_pca_feature_names_out():
448
+ """Check feature names out for IncrementalPCA."""
449
+ ipca = IncrementalPCA(n_components=2).fit(iris.data)
450
+
451
+ names = ipca.get_feature_names_out()
452
+ assert_array_equal([f"incrementalpca{i}" for i in range(2)], names)
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import sklearn
7
+ from sklearn.datasets import load_iris, make_blobs, make_circles
8
+ from sklearn.decomposition import PCA, KernelPCA
9
+ from sklearn.exceptions import NotFittedError
10
+ from sklearn.linear_model import Perceptron
11
+ from sklearn.metrics.pairwise import rbf_kernel
12
+ from sklearn.model_selection import GridSearchCV
13
+ from sklearn.pipeline import Pipeline
14
+ from sklearn.preprocessing import StandardScaler
15
+ from sklearn.utils._testing import (
16
+ assert_allclose,
17
+ assert_array_almost_equal,
18
+ assert_array_equal,
19
+ )
20
+ from sklearn.utils.fixes import CSR_CONTAINERS
21
+ from sklearn.utils.validation import _check_psd_eigenvalues
22
+
23
+
24
+ def test_kernel_pca():
25
+ """Nominal test for all solvers and all known kernels + a custom one
26
+
27
+ It tests
28
+ - that fit_transform is equivalent to fit+transform
29
+ - that the shapes of transforms and inverse transforms are correct
30
+ """
31
+ rng = np.random.RandomState(0)
32
+ X_fit = rng.random_sample((5, 4))
33
+ X_pred = rng.random_sample((2, 4))
34
+
35
+ def histogram(x, y, **kwargs):
36
+ # Histogram kernel implemented as a callable.
37
+ assert kwargs == {} # no kernel_params that we didn't ask for
38
+ return np.minimum(x, y).sum()
39
+
40
+ for eigen_solver in ("auto", "dense", "arpack", "randomized"):
41
+ for kernel in ("linear", "rbf", "poly", histogram):
42
+ # histogram kernel produces singular matrix inside linalg.solve
43
+ # XXX use a least-squares approximation?
44
+ inv = not callable(kernel)
45
+
46
+ # transform fit data
47
+ kpca = KernelPCA(
48
+ 4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv
49
+ )
50
+ X_fit_transformed = kpca.fit_transform(X_fit)
51
+ X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
52
+ assert_array_almost_equal(
53
+ np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
54
+ )
55
+
56
+ # non-regression test: previously, gamma would be 0 by default,
57
+ # forcing all eigenvalues to 0 under the poly kernel
58
+ assert X_fit_transformed.size != 0
59
+
60
+ # transform new data
61
+ X_pred_transformed = kpca.transform(X_pred)
62
+ assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
63
+
64
+ # inverse transform
65
+ if inv:
66
+ X_pred2 = kpca.inverse_transform(X_pred_transformed)
67
+ assert X_pred2.shape == X_pred.shape
68
+
69
+
70
+ def test_kernel_pca_invalid_parameters():
71
+ """Check that kPCA raises an error if the parameters are invalid
72
+
73
+ Tests fitting inverse transform with a precomputed kernel raises a
74
+ ValueError.
75
+ """
76
+ estimator = KernelPCA(
77
+ n_components=10, fit_inverse_transform=True, kernel="precomputed"
78
+ )
79
+ err_ms = "Cannot fit_inverse_transform with a precomputed kernel"
80
+ with pytest.raises(ValueError, match=err_ms):
81
+ estimator.fit(np.random.randn(10, 10))
82
+
83
+
84
+ def test_kernel_pca_consistent_transform():
85
+ """Check robustness to mutations in the original training array
86
+
87
+ Test that after fitting a kPCA model, it stays independent of any
88
+ mutation of the values of the original data object by relying on an
89
+ internal copy.
90
+ """
91
+ # X_fit_ needs to retain the old, unmodified copy of X
92
+ state = np.random.RandomState(0)
93
+ X = state.rand(10, 10)
94
+ kpca = KernelPCA(random_state=state).fit(X)
95
+ transformed1 = kpca.transform(X)
96
+
97
+ X_copy = X.copy()
98
+ X[:, 0] = 666
99
+ transformed2 = kpca.transform(X_copy)
100
+ assert_array_almost_equal(transformed1, transformed2)
101
+
102
+
103
+ def test_kernel_pca_deterministic_output():
104
+ """Test that Kernel PCA produces deterministic output
105
+
106
+ Tests that the same inputs and random state produce the same output.
107
+ """
108
+ rng = np.random.RandomState(0)
109
+ X = rng.rand(10, 10)
110
+ eigen_solver = ("arpack", "dense")
111
+
112
+ for solver in eigen_solver:
113
+ transformed_X = np.zeros((20, 2))
114
+ for i in range(20):
115
+ kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng)
116
+ transformed_X[i, :] = kpca.fit_transform(X)[0]
117
+ assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
118
+
119
+
120
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
121
+ def test_kernel_pca_sparse(csr_container):
122
+ """Test that kPCA works on a sparse data input.
123
+
124
+ Same test as ``test_kernel_pca except inverse_transform`` since it's not
125
+ implemented for sparse matrices.
126
+ """
127
+ rng = np.random.RandomState(0)
128
+ X_fit = csr_container(rng.random_sample((5, 4)))
129
+ X_pred = csr_container(rng.random_sample((2, 4)))
130
+
131
+ for eigen_solver in ("auto", "arpack", "randomized"):
132
+ for kernel in ("linear", "rbf", "poly"):
133
+ # transform fit data
134
+ kpca = KernelPCA(
135
+ 4,
136
+ kernel=kernel,
137
+ eigen_solver=eigen_solver,
138
+ fit_inverse_transform=False,
139
+ random_state=0,
140
+ )
141
+ X_fit_transformed = kpca.fit_transform(X_fit)
142
+ X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
143
+ assert_array_almost_equal(
144
+ np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
145
+ )
146
+
147
+ # transform new data
148
+ X_pred_transformed = kpca.transform(X_pred)
149
+ assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
150
+
151
+ # inverse transform: not available for sparse matrices
152
+ # XXX: should we raise another exception type here? For instance:
153
+ # NotImplementedError.
154
+ with pytest.raises(NotFittedError):
155
+ kpca.inverse_transform(X_pred_transformed)
156
+
157
+
158
+ @pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
159
+ @pytest.mark.parametrize("n_features", [4, 10])
160
+ def test_kernel_pca_linear_kernel(solver, n_features):
161
+ """Test that kPCA with linear kernel is equivalent to PCA for all solvers.
162
+
163
+ KernelPCA with linear kernel should produce the same output as PCA.
164
+ """
165
+ rng = np.random.RandomState(0)
166
+ X_fit = rng.random_sample((5, n_features))
167
+ X_pred = rng.random_sample((2, n_features))
168
+
169
+ # for a linear kernel, kernel PCA should find the same projection as PCA
170
+ # modulo the sign (direction)
171
+ # fit only the first four components: fifth is near zero eigenvalue, so
172
+ # can be trimmed due to roundoff error
173
+ n_comps = 3 if solver == "arpack" else 4
174
+ assert_array_almost_equal(
175
+ np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)),
176
+ np.abs(
177
+ PCA(n_comps, svd_solver=solver if solver != "dense" else "full")
178
+ .fit(X_fit)
179
+ .transform(X_pred)
180
+ ),
181
+ )
182
+
183
+
184
+ def test_kernel_pca_n_components():
185
+ """Test that `n_components` is correctly taken into account for projections
186
+
187
+ For all solvers this tests that the output has the correct shape depending
188
+ on the selected number of components.
189
+ """
190
+ rng = np.random.RandomState(0)
191
+ X_fit = rng.random_sample((5, 4))
192
+ X_pred = rng.random_sample((2, 4))
193
+
194
+ for eigen_solver in ("dense", "arpack", "randomized"):
195
+ for c in [1, 2, 4]:
196
+ kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
197
+ shape = kpca.fit(X_fit).transform(X_pred).shape
198
+
199
+ assert shape == (2, c)
200
+
201
+
202
+ def test_remove_zero_eig():
203
+ """Check that the ``remove_zero_eig`` parameter works correctly.
204
+
205
+ Tests that the null-space (Zero) eigenvalues are removed when
206
+ remove_zero_eig=True, whereas they are not by default.
207
+ """
208
+ X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
209
+
210
+ # n_components=None (default) => remove_zero_eig is True
211
+ kpca = KernelPCA()
212
+ Xt = kpca.fit_transform(X)
213
+ assert Xt.shape == (3, 0)
214
+
215
+ kpca = KernelPCA(n_components=2)
216
+ Xt = kpca.fit_transform(X)
217
+ assert Xt.shape == (3, 2)
218
+
219
+ kpca = KernelPCA(n_components=2, remove_zero_eig=True)
220
+ Xt = kpca.fit_transform(X)
221
+ assert Xt.shape == (3, 0)
222
+
223
+
224
+ def test_leave_zero_eig():
225
+ """Non-regression test for issue #12141 (PR #12143)
226
+
227
+ This test checks that fit().transform() returns the same result as
228
+ fit_transform() in case of non-removed zero eigenvalue.
229
+ """
230
+ X_fit = np.array([[1, 1], [0, 0]])
231
+
232
+ # Assert that even with all np warnings on, there is no div by zero warning
233
+ with warnings.catch_warnings():
234
+ # There might be warnings about the kernel being badly conditioned,
235
+ # but there should not be warnings about division by zero.
236
+ # (Numpy division by zero warning can have many message variants, but
237
+ # at least we know that it is a RuntimeWarning so lets check only this)
238
+ warnings.simplefilter("error", RuntimeWarning)
239
+ with np.errstate(all="warn"):
240
+ k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense")
241
+ # Fit, then transform
242
+ A = k.fit(X_fit).transform(X_fit)
243
+ # Do both at once
244
+ B = k.fit_transform(X_fit)
245
+ # Compare
246
+ assert_array_almost_equal(np.abs(A), np.abs(B))
247
+
248
+
249
+ def test_kernel_pca_precomputed():
250
+ """Test that kPCA works with a precomputed kernel, for all solvers"""
251
+ rng = np.random.RandomState(0)
252
+ X_fit = rng.random_sample((5, 4))
253
+ X_pred = rng.random_sample((2, 4))
254
+
255
+ for eigen_solver in ("dense", "arpack", "randomized"):
256
+ X_kpca = (
257
+ KernelPCA(4, eigen_solver=eigen_solver, random_state=0)
258
+ .fit(X_fit)
259
+ .transform(X_pred)
260
+ )
261
+
262
+ X_kpca2 = (
263
+ KernelPCA(
264
+ 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
265
+ )
266
+ .fit(np.dot(X_fit, X_fit.T))
267
+ .transform(np.dot(X_pred, X_fit.T))
268
+ )
269
+
270
+ X_kpca_train = KernelPCA(
271
+ 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
272
+ ).fit_transform(np.dot(X_fit, X_fit.T))
273
+
274
+ X_kpca_train2 = (
275
+ KernelPCA(
276
+ 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
277
+ )
278
+ .fit(np.dot(X_fit, X_fit.T))
279
+ .transform(np.dot(X_fit, X_fit.T))
280
+ )
281
+
282
+ assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2))
283
+
284
+ assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2))
285
+
286
+
287
+ @pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
288
+ def test_kernel_pca_precomputed_non_symmetric(solver):
289
+ """Check that the kernel centerer works.
290
+
291
+ Tests that a non symmetric precomputed kernel is actually accepted
292
+ because the kernel centerer does its job correctly.
293
+ """
294
+
295
+ # a non symmetric gram matrix
296
+ K = [[1, 2], [3, 40]]
297
+ kpca = KernelPCA(
298
+ kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
299
+ )
300
+ kpca.fit(K) # no error
301
+
302
+ # same test with centered kernel
303
+ Kc = [[9, -9], [-9, 9]]
304
+ kpca_c = KernelPCA(
305
+ kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
306
+ )
307
+ kpca_c.fit(Kc)
308
+
309
+ # comparison between the non-centered and centered versions
310
+ assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_)
311
+ assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_)
312
+
313
+
314
+ def test_gridsearch_pipeline():
315
+ """Check that kPCA works as expected in a grid search pipeline
316
+
317
+ Test if we can do a grid-search to find parameters to separate
318
+ circles with a perceptron model.
319
+ """
320
+ X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
321
+ kpca = KernelPCA(kernel="rbf", n_components=2)
322
+ pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
323
+ param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2))
324
+ grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
325
+ grid_search.fit(X, y)
326
+ assert grid_search.best_score_ == 1
327
+
328
+
329
+ def test_gridsearch_pipeline_precomputed():
330
+ """Check that kPCA works as expected in a grid search pipeline (2)
331
+
332
+ Test if we can do a grid-search to find parameters to separate
333
+ circles with a perceptron model. This test uses a precomputed kernel.
334
+ """
335
+ X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
336
+ kpca = KernelPCA(kernel="precomputed", n_components=2)
337
+ pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
338
+ param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
339
+ grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
340
+ X_kernel = rbf_kernel(X, gamma=2.0)
341
+ grid_search.fit(X_kernel, y)
342
+ assert grid_search.best_score_ == 1
343
+
344
+
345
+ def test_nested_circles():
346
+ """Check that kPCA projects in a space where nested circles are separable
347
+
348
+ Tests that 2D nested circles become separable with a perceptron when
349
+ projected in the first 2 kPCA using an RBF kernel, while raw samples
350
+ are not directly separable in the original space.
351
+ """
352
+ X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
353
+
354
+ # 2D nested circles are not linearly separable
355
+ train_score = Perceptron(max_iter=5).fit(X, y).score(X, y)
356
+ assert train_score < 0.8
357
+
358
+ # Project the circles data into the first 2 components of a RBF Kernel
359
+ # PCA model.
360
+ # Note that the gamma value is data dependent. If this test breaks
361
+ # and the gamma value has to be updated, the Kernel PCA example will
362
+ # have to be updated too.
363
+ kpca = KernelPCA(
364
+ kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0
365
+ )
366
+ X_kpca = kpca.fit_transform(X)
367
+
368
+ # The data is perfectly linearly separable in that space
369
+ train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y)
370
+ assert train_score == 1.0
371
+
372
+
373
+ def test_kernel_conditioning():
374
+ """Check that ``_check_psd_eigenvalues`` is correctly called in kPCA
375
+
376
+ Non-regression test for issue #12140 (PR #12145).
377
+ """
378
+
379
+ # create a pathological X leading to small non-zero eigenvalue
380
+ X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]]
381
+ kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True)
382
+ kpca.fit(X)
383
+
384
+ # check that the small non-zero eigenvalue was correctly set to zero
385
+ assert kpca.eigenvalues_.min() == 0
386
+ assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_))
387
+
388
+
389
+ @pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
390
+ def test_precomputed_kernel_not_psd(solver):
391
+ """Check how KernelPCA works with non-PSD kernels depending on n_components
392
+
393
+ Tests for all methods what happens with a non PSD gram matrix (this
394
+ can happen in an isomap scenario, or with custom kernel functions, or
395
+ maybe with ill-posed datasets).
396
+
397
+ When ``n_component`` is large enough to capture a negative eigenvalue, an
398
+ error should be raised. Otherwise, KernelPCA should run without error
399
+ since the negative eigenvalues are not selected.
400
+ """
401
+
402
+ # a non PSD kernel with large eigenvalues, already centered
403
+ # it was captured from an isomap call and multiplied by 100 for compacity
404
+ K = [
405
+ [4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78],
406
+ [-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49],
407
+ [8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23],
408
+ [2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9],
409
+ [2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9],
410
+ [2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9],
411
+ [-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75],
412
+ [-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46],
413
+ ]
414
+ # this gram matrix has 5 positive eigenvalues and 3 negative ones
415
+ # [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11]
416
+
417
+ # 1. ask for enough components to get a significant negative one
418
+ kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7)
419
+ # make sure that the appropriate error is raised
420
+ with pytest.raises(ValueError, match="There are significant negative eigenvalues"):
421
+ kpca.fit(K)
422
+
423
+ # 2. ask for a small enough n_components to get only positive ones
424
+ kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2)
425
+ if solver == "randomized":
426
+ # the randomized method is still inconsistent with the others on this
427
+ # since it selects the eigenvalues based on the largest 2 modules, not
428
+ # on the largest 2 values.
429
+ #
430
+ # At least we can ensure that we return an error instead of returning
431
+ # the wrong eigenvalues
432
+ with pytest.raises(
433
+ ValueError, match="There are significant negative eigenvalues"
434
+ ):
435
+ kpca.fit(K)
436
+ else:
437
+ # general case: make sure that it works
438
+ kpca.fit(K)
439
+
440
+
441
+ @pytest.mark.parametrize("n_components", [4, 10, 20])
442
+ def test_kernel_pca_solvers_equivalence(n_components):
443
+ """Check that 'dense' 'arpack' & 'randomized' solvers give similar results"""
444
+
445
+ # Generate random data
446
+ n_train, n_test = 1_000, 100
447
+ X, _ = make_circles(
448
+ n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0
449
+ )
450
+ X_fit, X_pred = X[:n_train, :], X[n_train:, :]
451
+
452
+ # reference (full)
453
+ ref_pred = (
454
+ KernelPCA(n_components, eigen_solver="dense", random_state=0)
455
+ .fit(X_fit)
456
+ .transform(X_pred)
457
+ )
458
+
459
+ # arpack
460
+ a_pred = (
461
+ KernelPCA(n_components, eigen_solver="arpack", random_state=0)
462
+ .fit(X_fit)
463
+ .transform(X_pred)
464
+ )
465
+ # check that the result is still correct despite the approx
466
+ assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred))
467
+
468
+ # randomized
469
+ r_pred = (
470
+ KernelPCA(n_components, eigen_solver="randomized", random_state=0)
471
+ .fit(X_fit)
472
+ .transform(X_pred)
473
+ )
474
+ # check that the result is still correct despite the approximation
475
+ assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred))
476
+
477
+
478
+ def test_kernel_pca_inverse_transform_reconstruction():
479
+ """Test if the reconstruction is a good approximation.
480
+
481
+ Note that in general it is not possible to get an arbitrarily good
482
+ reconstruction because of kernel centering that does not
483
+ preserve all the information of the original data.
484
+ """
485
+ X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
486
+
487
+ kpca = KernelPCA(
488
+ n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3
489
+ )
490
+ X_trans = kpca.fit_transform(X)
491
+ X_reconst = kpca.inverse_transform(X_trans)
492
+ assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1
493
+
494
+
495
+ def test_kernel_pca_raise_not_fitted_error():
496
+ X = np.random.randn(15).reshape(5, 3)
497
+ kpca = KernelPCA()
498
+ kpca.fit(X)
499
+ with pytest.raises(NotFittedError):
500
+ kpca.inverse_transform(X)
501
+
502
+
503
+ def test_32_64_decomposition_shape():
504
+ """Test that the decomposition is similar for 32 and 64 bits data
505
+
506
+ Non regression test for
507
+ https://github.com/scikit-learn/scikit-learn/issues/18146
508
+ """
509
+ X, y = make_blobs(
510
+ n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1
511
+ )
512
+ X = StandardScaler().fit_transform(X)
513
+ X -= X.min()
514
+
515
+ # Compare the shapes (corresponds to the number of non-zero eigenvalues)
516
+ kpca = KernelPCA()
517
+ assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape
518
+
519
+
520
+ def test_kernel_pca_feature_names_out():
521
+ """Check feature names out for KernelPCA."""
522
+ X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
523
+ kpca = KernelPCA(n_components=2).fit(X)
524
+
525
+ names = kpca.get_feature_names_out()
526
+ assert_array_equal([f"kernelpca{i}" for i in range(2)], names)
527
+
528
+
529
+ def test_kernel_pca_inverse_correct_gamma():
530
+ """Check that gamma is set correctly when not provided.
531
+
532
+ Non-regression test for #26280
533
+ """
534
+ rng = np.random.RandomState(0)
535
+ X = rng.random_sample((5, 4))
536
+
537
+ kwargs = {
538
+ "n_components": 2,
539
+ "random_state": rng,
540
+ "fit_inverse_transform": True,
541
+ "kernel": "rbf",
542
+ }
543
+
544
+ expected_gamma = 1 / X.shape[1]
545
+ kpca1 = KernelPCA(gamma=None, **kwargs).fit(X)
546
+ kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X)
547
+
548
+ assert kpca1.gamma_ == expected_gamma
549
+ assert kpca2.gamma_ == expected_gamma
550
+
551
+ X1_recon = kpca1.inverse_transform(kpca1.transform(X))
552
+ X2_recon = kpca2.inverse_transform(kpca1.transform(X))
553
+
554
+ assert_allclose(X1_recon, X2_recon)
555
+
556
+
557
+ def test_kernel_pca_pandas_output():
558
+ """Check that KernelPCA works with pandas output when the solver is arpack.
559
+
560
+ Non-regression test for:
561
+ https://github.com/scikit-learn/scikit-learn/issues/27579
562
+ """
563
+ pytest.importorskip("pandas")
564
+ X, _ = load_iris(as_frame=True, return_X_y=True)
565
+ with sklearn.config_context(transform_output="pandas"):
566
+ KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X)
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py ADDED
@@ -0,0 +1,1062 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+ import warnings
4
+ from io import StringIO
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from scipy import linalg
9
+
10
+ from sklearn.base import clone
11
+ from sklearn.decomposition import NMF, MiniBatchNMF, non_negative_factorization
12
+ from sklearn.decomposition import _nmf as nmf # For testing internals
13
+ from sklearn.exceptions import ConvergenceWarning
14
+ from sklearn.utils._testing import (
15
+ assert_allclose,
16
+ assert_almost_equal,
17
+ assert_array_almost_equal,
18
+ assert_array_equal,
19
+ ignore_warnings,
20
+ )
21
+ from sklearn.utils.extmath import squared_norm
22
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
23
+
24
+
25
+ @pytest.mark.parametrize(
26
+ ["Estimator", "solver"],
27
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
28
+ )
29
+ def test_convergence_warning(Estimator, solver):
30
+ convergence_warning = (
31
+ "Maximum number of iterations 1 reached. Increase it to improve convergence."
32
+ )
33
+ A = np.ones((2, 2))
34
+ with pytest.warns(ConvergenceWarning, match=convergence_warning):
35
+ Estimator(max_iter=1, n_components="auto", **solver).fit(A)
36
+
37
+
38
+ def test_initialize_nn_output():
39
+ # Test that initialization does not return negative values
40
+ rng = np.random.mtrand.RandomState(42)
41
+ data = np.abs(rng.randn(10, 10))
42
+ for init in ("random", "nndsvd", "nndsvda", "nndsvdar"):
43
+ W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
44
+ assert not ((W < 0).any() or (H < 0).any())
45
+
46
+
47
+ # TODO(1.6): remove the warning filter for `n_components`
48
+ @pytest.mark.filterwarnings(
49
+ r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in"
50
+ r" the initialization",
51
+ "ignore:The default value of `n_components` will change",
52
+ )
53
+ def test_parameter_checking():
54
+ # Here we only check for invalid parameter values that are not already
55
+ # automatically tested in the common tests.
56
+
57
+ A = np.ones((2, 2))
58
+
59
+ msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0"
60
+ with pytest.raises(ValueError, match=msg):
61
+ NMF(solver="cd", beta_loss=1.0).fit(A)
62
+ msg = "Negative values in data passed to"
63
+ with pytest.raises(ValueError, match=msg):
64
+ NMF().fit(-A)
65
+ clf = NMF(2, tol=0.1).fit(A)
66
+ with pytest.raises(ValueError, match=msg):
67
+ clf.transform(-A)
68
+ with pytest.raises(ValueError, match=msg):
69
+ nmf._initialize_nmf(-A, 2, "nndsvd")
70
+
71
+ for init in ["nndsvd", "nndsvda", "nndsvdar"]:
72
+ msg = re.escape(
73
+ "init = '{}' can only be used when "
74
+ "n_components <= min(n_samples, n_features)".format(init)
75
+ )
76
+ with pytest.raises(ValueError, match=msg):
77
+ NMF(3, init=init).fit(A)
78
+ with pytest.raises(ValueError, match=msg):
79
+ MiniBatchNMF(3, init=init).fit(A)
80
+ with pytest.raises(ValueError, match=msg):
81
+ nmf._initialize_nmf(A, 3, init)
82
+
83
+
84
+ def test_initialize_close():
85
+ # Test NNDSVD error
86
+ # Test that _initialize_nmf error is less than the standard deviation of
87
+ # the entries in the matrix.
88
+ rng = np.random.mtrand.RandomState(42)
89
+ A = np.abs(rng.randn(10, 10))
90
+ W, H = nmf._initialize_nmf(A, 10, init="nndsvd")
91
+ error = linalg.norm(np.dot(W, H) - A)
92
+ sdev = linalg.norm(A - A.mean())
93
+ assert error <= sdev
94
+
95
+
96
+ def test_initialize_variants():
97
+ # Test NNDSVD variants correctness
98
+ # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
99
+ # 'nndsvd' only where the basic version has zeros.
100
+ rng = np.random.mtrand.RandomState(42)
101
+ data = np.abs(rng.randn(10, 10))
102
+ W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd")
103
+ Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda")
104
+ War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0)
105
+
106
+ for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
107
+ assert_almost_equal(evl[ref != 0], ref[ref != 0])
108
+
109
+
110
+ # ignore UserWarning raised when both solver='mu' and init='nndsvd'
111
+ @ignore_warnings(category=UserWarning)
112
+ @pytest.mark.parametrize(
113
+ ["Estimator", "solver"],
114
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
115
+ )
116
+ @pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random"))
117
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
118
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
119
+ def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H):
120
+ # Test that the decomposition does not contain negative values
121
+ A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)]
122
+ model = Estimator(
123
+ n_components=2,
124
+ init=init,
125
+ alpha_W=alpha_W,
126
+ alpha_H=alpha_H,
127
+ random_state=0,
128
+ **solver,
129
+ )
130
+ transf = model.fit_transform(A)
131
+ assert not ((model.components_ < 0).any() or (transf < 0).any())
132
+
133
+
134
+ @pytest.mark.parametrize(
135
+ ["Estimator", "solver"],
136
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
137
+ )
138
+ def test_nmf_fit_close(Estimator, solver):
139
+ rng = np.random.mtrand.RandomState(42)
140
+ # Test that the fit is not too far away
141
+ pnmf = Estimator(
142
+ 5,
143
+ init="nndsvdar",
144
+ random_state=0,
145
+ max_iter=600,
146
+ **solver,
147
+ )
148
+ X = np.abs(rng.randn(6, 5))
149
+ assert pnmf.fit(X).reconstruction_err_ < 0.1
150
+
151
+
152
+ def test_nmf_true_reconstruction():
153
+ # Test that the fit is not too far away from an exact solution
154
+ # (by construction)
155
+ n_samples = 15
156
+ n_features = 10
157
+ n_components = 5
158
+ beta_loss = 1
159
+ batch_size = 3
160
+ max_iter = 1000
161
+
162
+ rng = np.random.mtrand.RandomState(42)
163
+ W_true = np.zeros([n_samples, n_components])
164
+ W_array = np.abs(rng.randn(n_samples))
165
+ for j in range(n_components):
166
+ W_true[j % n_samples, j] = W_array[j % n_samples]
167
+ H_true = np.zeros([n_components, n_features])
168
+ H_array = np.abs(rng.randn(n_components))
169
+ for j in range(n_features):
170
+ H_true[j % n_components, j] = H_array[j % n_components]
171
+ X = np.dot(W_true, H_true)
172
+
173
+ model = NMF(
174
+ n_components=n_components,
175
+ solver="mu",
176
+ beta_loss=beta_loss,
177
+ max_iter=max_iter,
178
+ random_state=0,
179
+ )
180
+ transf = model.fit_transform(X)
181
+ X_calc = np.dot(transf, model.components_)
182
+
183
+ assert model.reconstruction_err_ < 0.1
184
+ assert_allclose(X, X_calc)
185
+
186
+ mbmodel = MiniBatchNMF(
187
+ n_components=n_components,
188
+ beta_loss=beta_loss,
189
+ batch_size=batch_size,
190
+ random_state=0,
191
+ max_iter=max_iter,
192
+ )
193
+ transf = mbmodel.fit_transform(X)
194
+ X_calc = np.dot(transf, mbmodel.components_)
195
+
196
+ assert mbmodel.reconstruction_err_ < 0.1
197
+ assert_allclose(X, X_calc, atol=1)
198
+
199
+
200
+ @pytest.mark.parametrize("solver", ["cd", "mu"])
201
+ def test_nmf_transform(solver):
202
+ # Test that fit_transform is equivalent to fit.transform for NMF
203
+ # Test that NMF.transform returns close values
204
+ rng = np.random.mtrand.RandomState(42)
205
+ A = np.abs(rng.randn(6, 5))
206
+ m = NMF(
207
+ solver=solver,
208
+ n_components=3,
209
+ init="random",
210
+ random_state=0,
211
+ tol=1e-6,
212
+ )
213
+ ft = m.fit_transform(A)
214
+ t = m.transform(A)
215
+ assert_allclose(ft, t, atol=1e-1)
216
+
217
+
218
+ def test_minibatch_nmf_transform():
219
+ # Test that fit_transform is equivalent to fit.transform for MiniBatchNMF
220
+ # Only guaranteed with fresh restarts
221
+ rng = np.random.mtrand.RandomState(42)
222
+ A = np.abs(rng.randn(6, 5))
223
+ m = MiniBatchNMF(
224
+ n_components=3,
225
+ random_state=0,
226
+ tol=1e-3,
227
+ fresh_restarts=True,
228
+ )
229
+ ft = m.fit_transform(A)
230
+ t = m.transform(A)
231
+ assert_allclose(ft, t)
232
+
233
+
234
+ @pytest.mark.parametrize(
235
+ ["Estimator", "solver"],
236
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
237
+ )
238
+ def test_nmf_transform_custom_init(Estimator, solver):
239
+ # Smoke test that checks if NMF.transform works with custom initialization
240
+ random_state = np.random.RandomState(0)
241
+ A = np.abs(random_state.randn(6, 5))
242
+ n_components = 4
243
+ avg = np.sqrt(A.mean() / n_components)
244
+ H_init = np.abs(avg * random_state.randn(n_components, 5))
245
+ W_init = np.abs(avg * random_state.randn(6, n_components))
246
+
247
+ m = Estimator(
248
+ n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver
249
+ )
250
+ m.fit_transform(A, W=W_init, H=H_init)
251
+ m.transform(A)
252
+
253
+
254
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
255
+ def test_nmf_inverse_transform(solver):
256
+ # Test that NMF.inverse_transform returns close values
257
+ random_state = np.random.RandomState(0)
258
+ A = np.abs(random_state.randn(6, 4))
259
+ m = NMF(
260
+ solver=solver,
261
+ n_components=4,
262
+ init="random",
263
+ random_state=0,
264
+ max_iter=1000,
265
+ )
266
+ ft = m.fit_transform(A)
267
+ A_new = m.inverse_transform(ft)
268
+ assert_array_almost_equal(A, A_new, decimal=2)
269
+
270
+
271
+ # TODO(1.6): remove the warning filter
272
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
273
+ def test_mbnmf_inverse_transform():
274
+ # Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform
275
+ # is close to the identity
276
+ rng = np.random.RandomState(0)
277
+ A = np.abs(rng.randn(6, 4))
278
+ nmf = MiniBatchNMF(
279
+ random_state=rng,
280
+ max_iter=500,
281
+ init="nndsvdar",
282
+ fresh_restarts=True,
283
+ )
284
+ ft = nmf.fit_transform(A)
285
+ A_new = nmf.inverse_transform(ft)
286
+ assert_allclose(A, A_new, rtol=1e-3, atol=1e-2)
287
+
288
+
289
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
290
+ def test_n_components_greater_n_features(Estimator):
291
+ # Smoke test for the case of more components than features.
292
+ rng = np.random.mtrand.RandomState(42)
293
+ A = np.abs(rng.randn(30, 10))
294
+ Estimator(n_components=15, random_state=0, tol=1e-2).fit(A)
295
+
296
+
297
+ @pytest.mark.parametrize(
298
+ ["Estimator", "solver"],
299
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
300
+ )
301
+ @pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
302
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
303
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
304
+ def test_nmf_sparse_input(Estimator, solver, sparse_container, alpha_W, alpha_H):
305
+ # Test that sparse matrices are accepted as input
306
+ rng = np.random.mtrand.RandomState(42)
307
+ A = np.abs(rng.randn(10, 10))
308
+ A[:, 2 * np.arange(5)] = 0
309
+ A_sparse = sparse_container(A)
310
+
311
+ est1 = Estimator(
312
+ n_components=5,
313
+ init="random",
314
+ alpha_W=alpha_W,
315
+ alpha_H=alpha_H,
316
+ random_state=0,
317
+ tol=0,
318
+ max_iter=100,
319
+ **solver,
320
+ )
321
+ est2 = clone(est1)
322
+
323
+ W1 = est1.fit_transform(A)
324
+ W2 = est2.fit_transform(A_sparse)
325
+ H1 = est1.components_
326
+ H2 = est2.components_
327
+
328
+ assert_allclose(W1, W2)
329
+ assert_allclose(H1, H2)
330
+
331
+
332
+ @pytest.mark.parametrize(
333
+ ["Estimator", "solver"],
334
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
335
+ )
336
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
337
+ def test_nmf_sparse_transform(Estimator, solver, csc_container):
338
+ # Test that transform works on sparse data. Issue #2124
339
+ rng = np.random.mtrand.RandomState(42)
340
+ A = np.abs(rng.randn(3, 2))
341
+ A[1, 1] = 0
342
+ A = csc_container(A)
343
+
344
+ model = Estimator(random_state=0, n_components=2, max_iter=400, **solver)
345
+ A_fit_tr = model.fit_transform(A)
346
+ A_tr = model.transform(A)
347
+ assert_allclose(A_fit_tr, A_tr, atol=1e-1)
348
+
349
+
350
+ # TODO(1.6): remove the warning filter
351
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
352
+ @pytest.mark.parametrize("init", ["random", "nndsvd"])
353
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
354
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
355
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
356
+ def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H):
357
+ # Test that the function is called in the same way, either directly
358
+ # or through the NMF class
359
+ max_iter = 500
360
+ rng = np.random.mtrand.RandomState(42)
361
+ A = np.abs(rng.randn(10, 10))
362
+ A[:, 2 * np.arange(5)] = 0
363
+
364
+ W_nmf, H, _ = non_negative_factorization(
365
+ A,
366
+ init=init,
367
+ solver=solver,
368
+ max_iter=max_iter,
369
+ alpha_W=alpha_W,
370
+ alpha_H=alpha_H,
371
+ random_state=1,
372
+ tol=1e-2,
373
+ )
374
+ W_nmf_2, H, _ = non_negative_factorization(
375
+ A,
376
+ H=H,
377
+ update_H=False,
378
+ init=init,
379
+ solver=solver,
380
+ max_iter=max_iter,
381
+ alpha_W=alpha_W,
382
+ alpha_H=alpha_H,
383
+ random_state=1,
384
+ tol=1e-2,
385
+ )
386
+
387
+ model_class = NMF(
388
+ init=init,
389
+ solver=solver,
390
+ max_iter=max_iter,
391
+ alpha_W=alpha_W,
392
+ alpha_H=alpha_H,
393
+ random_state=1,
394
+ tol=1e-2,
395
+ )
396
+ W_cls = model_class.fit_transform(A)
397
+ W_cls_2 = model_class.transform(A)
398
+
399
+ assert_allclose(W_nmf, W_cls)
400
+ assert_allclose(W_nmf_2, W_cls_2)
401
+
402
+
403
+ def test_non_negative_factorization_checking():
404
+ # Note that the validity of parameter types and range of possible values
405
+ # for scalar numerical or str parameters is already checked in the common
406
+ # tests. Here we only check for problems that cannot be captured by simple
407
+ # declarative constraints on the valid parameter values.
408
+
409
+ A = np.ones((2, 2))
410
+ # Test parameters checking in public function
411
+ nnmf = non_negative_factorization
412
+ msg = re.escape("Negative values in data passed to NMF (input H)")
413
+ with pytest.raises(ValueError, match=msg):
414
+ nnmf(A, A, -A, 2, init="custom")
415
+ msg = re.escape("Negative values in data passed to NMF (input W)")
416
+ with pytest.raises(ValueError, match=msg):
417
+ nnmf(A, -A, A, 2, init="custom")
418
+ msg = re.escape("Array passed to NMF (input H) is full of zeros")
419
+ with pytest.raises(ValueError, match=msg):
420
+ nnmf(A, A, 0 * A, 2, init="custom")
421
+
422
+
423
+ def _beta_divergence_dense(X, W, H, beta):
424
+ """Compute the beta-divergence of X and W.H for dense array only.
425
+
426
+ Used as a reference for testing nmf._beta_divergence.
427
+ """
428
+ WH = np.dot(W, H)
429
+
430
+ if beta == 2:
431
+ return squared_norm(X - WH) / 2
432
+
433
+ WH_Xnonzero = WH[X != 0]
434
+ X_nonzero = X[X != 0]
435
+ np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
436
+
437
+ if beta == 1:
438
+ res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
439
+ res += WH.sum() - X.sum()
440
+
441
+ elif beta == 0:
442
+ div = X_nonzero / WH_Xnonzero
443
+ res = np.sum(div) - X.size - np.sum(np.log(div))
444
+ else:
445
+ res = (X_nonzero**beta).sum()
446
+ res += (beta - 1) * (WH**beta).sum()
447
+ res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
448
+ res /= beta * (beta - 1)
449
+
450
+ return res
451
+
452
+
453
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
454
+ def test_beta_divergence(csr_container):
455
+ # Compare _beta_divergence with the reference _beta_divergence_dense
456
+ n_samples = 20
457
+ n_features = 10
458
+ n_components = 5
459
+ beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0]
460
+
461
+ # initialization
462
+ rng = np.random.mtrand.RandomState(42)
463
+ X = rng.randn(n_samples, n_features)
464
+ np.clip(X, 0, None, out=X)
465
+ X_csr = csr_container(X)
466
+ W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
467
+
468
+ for beta in beta_losses:
469
+ ref = _beta_divergence_dense(X, W, H, beta)
470
+ loss = nmf._beta_divergence(X, W, H, beta)
471
+ loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
472
+
473
+ assert_almost_equal(ref, loss, decimal=7)
474
+ assert_almost_equal(ref, loss_csr, decimal=7)
475
+
476
+
477
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
478
+ def test_special_sparse_dot(csr_container):
479
+ # Test the function that computes np.dot(W, H), only where X is non zero.
480
+ n_samples = 10
481
+ n_features = 5
482
+ n_components = 3
483
+ rng = np.random.mtrand.RandomState(42)
484
+ X = rng.randn(n_samples, n_features)
485
+ np.clip(X, 0, None, out=X)
486
+ X_csr = csr_container(X)
487
+
488
+ W = np.abs(rng.randn(n_samples, n_components))
489
+ H = np.abs(rng.randn(n_components, n_features))
490
+
491
+ WH_safe = nmf._special_sparse_dot(W, H, X_csr)
492
+ WH = nmf._special_sparse_dot(W, H, X)
493
+
494
+ # test that both results have same values, in X_csr nonzero elements
495
+ ii, jj = X_csr.nonzero()
496
+ WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
497
+ assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
498
+
499
+ # test that WH_safe and X_csr have the same sparse structure
500
+ assert_array_equal(WH_safe.indices, X_csr.indices)
501
+ assert_array_equal(WH_safe.indptr, X_csr.indptr)
502
+ assert_array_equal(WH_safe.shape, X_csr.shape)
503
+
504
+
505
+ @ignore_warnings(category=ConvergenceWarning)
506
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
507
+ def test_nmf_multiplicative_update_sparse(csr_container):
508
+ # Compare sparse and dense input in multiplicative update NMF
509
+ # Also test continuity of the results with respect to beta_loss parameter
510
+ n_samples = 20
511
+ n_features = 10
512
+ n_components = 5
513
+ alpha = 0.1
514
+ l1_ratio = 0.5
515
+ n_iter = 20
516
+
517
+ # initialization
518
+ rng = np.random.mtrand.RandomState(1337)
519
+ X = rng.randn(n_samples, n_features)
520
+ X = np.abs(X)
521
+ X_csr = csr_container(X)
522
+ W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
523
+
524
+ for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
525
+ # Reference with dense array X
526
+ W, H = W0.copy(), H0.copy()
527
+ W1, H1, _ = non_negative_factorization(
528
+ X,
529
+ W,
530
+ H,
531
+ n_components,
532
+ init="custom",
533
+ update_H=True,
534
+ solver="mu",
535
+ beta_loss=beta_loss,
536
+ max_iter=n_iter,
537
+ alpha_W=alpha,
538
+ l1_ratio=l1_ratio,
539
+ random_state=42,
540
+ )
541
+
542
+ # Compare with sparse X
543
+ W, H = W0.copy(), H0.copy()
544
+ W2, H2, _ = non_negative_factorization(
545
+ X_csr,
546
+ W,
547
+ H,
548
+ n_components,
549
+ init="custom",
550
+ update_H=True,
551
+ solver="mu",
552
+ beta_loss=beta_loss,
553
+ max_iter=n_iter,
554
+ alpha_W=alpha,
555
+ l1_ratio=l1_ratio,
556
+ random_state=42,
557
+ )
558
+
559
+ assert_allclose(W1, W2, atol=1e-7)
560
+ assert_allclose(H1, H2, atol=1e-7)
561
+
562
+ # Compare with almost same beta_loss, since some values have a specific
563
+ # behavior, but the results should be continuous w.r.t beta_loss
564
+ beta_loss -= 1.0e-5
565
+ W, H = W0.copy(), H0.copy()
566
+ W3, H3, _ = non_negative_factorization(
567
+ X_csr,
568
+ W,
569
+ H,
570
+ n_components,
571
+ init="custom",
572
+ update_H=True,
573
+ solver="mu",
574
+ beta_loss=beta_loss,
575
+ max_iter=n_iter,
576
+ alpha_W=alpha,
577
+ l1_ratio=l1_ratio,
578
+ random_state=42,
579
+ )
580
+
581
+ assert_allclose(W1, W3, atol=1e-4)
582
+ assert_allclose(H1, H3, atol=1e-4)
583
+
584
+
585
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
586
+ def test_nmf_negative_beta_loss(csr_container):
587
+ # Test that an error is raised if beta_loss < 0 and X contains zeros.
588
+ # Test that the output has not NaN values when the input contains zeros.
589
+ n_samples = 6
590
+ n_features = 5
591
+ n_components = 3
592
+
593
+ rng = np.random.mtrand.RandomState(42)
594
+ X = rng.randn(n_samples, n_features)
595
+ np.clip(X, 0, None, out=X)
596
+ X_csr = csr_container(X)
597
+
598
+ def _assert_nmf_no_nan(X, beta_loss):
599
+ W, H, _ = non_negative_factorization(
600
+ X,
601
+ init="random",
602
+ n_components=n_components,
603
+ solver="mu",
604
+ beta_loss=beta_loss,
605
+ random_state=0,
606
+ max_iter=1000,
607
+ )
608
+ assert not np.any(np.isnan(W))
609
+ assert not np.any(np.isnan(H))
610
+
611
+ msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
612
+ for beta_loss in (-0.6, 0.0):
613
+ with pytest.raises(ValueError, match=msg):
614
+ _assert_nmf_no_nan(X, beta_loss)
615
+ _assert_nmf_no_nan(X + 1e-9, beta_loss)
616
+
617
+ for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5):
618
+ _assert_nmf_no_nan(X, beta_loss)
619
+ _assert_nmf_no_nan(X_csr, beta_loss)
620
+
621
+
622
+ # TODO(1.6): remove the warning filter
623
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
624
+ @pytest.mark.parametrize("beta_loss", [-0.5, 0.0])
625
+ def test_minibatch_nmf_negative_beta_loss(beta_loss):
626
+ """Check that an error is raised if beta_loss < 0 and X contains zeros."""
627
+ rng = np.random.RandomState(0)
628
+ X = rng.normal(size=(6, 5))
629
+ X[X < 0] = 0
630
+
631
+ nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0)
632
+
633
+ msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
634
+ with pytest.raises(ValueError, match=msg):
635
+ nmf.fit(X)
636
+
637
+
638
+ @pytest.mark.parametrize(
639
+ ["Estimator", "solver"],
640
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
641
+ )
642
+ def test_nmf_regularization(Estimator, solver):
643
+ # Test the effect of L1 and L2 regularizations
644
+ n_samples = 6
645
+ n_features = 5
646
+ n_components = 3
647
+ rng = np.random.mtrand.RandomState(42)
648
+ X = np.abs(rng.randn(n_samples, n_features))
649
+
650
+ # L1 regularization should increase the number of zeros
651
+ l1_ratio = 1.0
652
+ regul = Estimator(
653
+ n_components=n_components,
654
+ alpha_W=0.5,
655
+ l1_ratio=l1_ratio,
656
+ random_state=42,
657
+ **solver,
658
+ )
659
+ model = Estimator(
660
+ n_components=n_components,
661
+ alpha_W=0.0,
662
+ l1_ratio=l1_ratio,
663
+ random_state=42,
664
+ **solver,
665
+ )
666
+
667
+ W_regul = regul.fit_transform(X)
668
+ W_model = model.fit_transform(X)
669
+
670
+ H_regul = regul.components_
671
+ H_model = model.components_
672
+
673
+ eps = np.finfo(np.float64).eps
674
+ W_regul_n_zeros = W_regul[W_regul <= eps].size
675
+ W_model_n_zeros = W_model[W_model <= eps].size
676
+ H_regul_n_zeros = H_regul[H_regul <= eps].size
677
+ H_model_n_zeros = H_model[H_model <= eps].size
678
+
679
+ assert W_regul_n_zeros > W_model_n_zeros
680
+ assert H_regul_n_zeros > H_model_n_zeros
681
+
682
+ # L2 regularization should decrease the sum of the squared norm
683
+ # of the matrices W and H
684
+ l1_ratio = 0.0
685
+ regul = Estimator(
686
+ n_components=n_components,
687
+ alpha_W=0.5,
688
+ l1_ratio=l1_ratio,
689
+ random_state=42,
690
+ **solver,
691
+ )
692
+ model = Estimator(
693
+ n_components=n_components,
694
+ alpha_W=0.0,
695
+ l1_ratio=l1_ratio,
696
+ random_state=42,
697
+ **solver,
698
+ )
699
+
700
+ W_regul = regul.fit_transform(X)
701
+ W_model = model.fit_transform(X)
702
+
703
+ H_regul = regul.components_
704
+ H_model = model.components_
705
+
706
+ assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > (
707
+ linalg.norm(W_regul)
708
+ ) ** 2.0 + (linalg.norm(H_regul)) ** 2.0
709
+
710
+
711
+ @ignore_warnings(category=ConvergenceWarning)
712
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
713
+ def test_nmf_decreasing(solver):
714
+ # test that the objective function is decreasing at each iteration
715
+ n_samples = 20
716
+ n_features = 15
717
+ n_components = 10
718
+ alpha = 0.1
719
+ l1_ratio = 0.5
720
+ tol = 0.0
721
+
722
+ # initialization
723
+ rng = np.random.mtrand.RandomState(42)
724
+ X = rng.randn(n_samples, n_features)
725
+ np.abs(X, X)
726
+ W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
727
+
728
+ for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
729
+ if solver != "mu" and beta_loss != 2:
730
+ # not implemented
731
+ continue
732
+ W, H = W0.copy(), H0.copy()
733
+ previous_loss = None
734
+ for _ in range(30):
735
+ # one more iteration starting from the previous results
736
+ W, H, _ = non_negative_factorization(
737
+ X,
738
+ W,
739
+ H,
740
+ beta_loss=beta_loss,
741
+ init="custom",
742
+ n_components=n_components,
743
+ max_iter=1,
744
+ alpha_W=alpha,
745
+ solver=solver,
746
+ tol=tol,
747
+ l1_ratio=l1_ratio,
748
+ verbose=0,
749
+ random_state=0,
750
+ update_H=True,
751
+ )
752
+
753
+ loss = (
754
+ nmf._beta_divergence(X, W, H, beta_loss)
755
+ + alpha * l1_ratio * n_features * W.sum()
756
+ + alpha * l1_ratio * n_samples * H.sum()
757
+ + alpha * (1 - l1_ratio) * n_features * (W**2).sum()
758
+ + alpha * (1 - l1_ratio) * n_samples * (H**2).sum()
759
+ )
760
+ if previous_loss is not None:
761
+ assert previous_loss > loss
762
+ previous_loss = loss
763
+
764
+
765
+ def test_nmf_underflow():
766
+ # Regression test for an underflow issue in _beta_divergence
767
+ rng = np.random.RandomState(0)
768
+ n_samples, n_features, n_components = 10, 2, 2
769
+ X = np.abs(rng.randn(n_samples, n_features)) * 10
770
+ W = np.abs(rng.randn(n_samples, n_components)) * 10
771
+ H = np.abs(rng.randn(n_components, n_features))
772
+
773
+ X[0, 0] = 0
774
+ ref = nmf._beta_divergence(X, W, H, beta=1.0)
775
+ X[0, 0] = 1e-323
776
+ res = nmf._beta_divergence(X, W, H, beta=1.0)
777
+ assert_almost_equal(res, ref)
778
+
779
+
780
+ # TODO(1.6): remove the warning filter
781
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
782
+ @pytest.mark.parametrize(
783
+ "dtype_in, dtype_out",
784
+ [
785
+ (np.float32, np.float32),
786
+ (np.float64, np.float64),
787
+ (np.int32, np.float64),
788
+ (np.int64, np.float64),
789
+ ],
790
+ )
791
+ @pytest.mark.parametrize(
792
+ ["Estimator", "solver"],
793
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
794
+ )
795
+ def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out):
796
+ # Check that NMF preserves dtype (float32 and float64)
797
+ X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False)
798
+ np.abs(X, out=X)
799
+
800
+ nmf = Estimator(
801
+ alpha_W=1.0,
802
+ alpha_H=1.0,
803
+ tol=1e-2,
804
+ random_state=0,
805
+ **solver,
806
+ )
807
+
808
+ assert nmf.fit(X).transform(X).dtype == dtype_out
809
+ assert nmf.fit_transform(X).dtype == dtype_out
810
+ assert nmf.components_.dtype == dtype_out
811
+
812
+
813
+ # TODO(1.6): remove the warning filter
814
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
815
+ @pytest.mark.parametrize(
816
+ ["Estimator", "solver"],
817
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
818
+ )
819
+ def test_nmf_float32_float64_consistency(Estimator, solver):
820
+ # Check that the result of NMF is the same between float32 and float64
821
+ X = np.random.RandomState(0).randn(50, 7)
822
+ np.abs(X, out=X)
823
+ nmf32 = Estimator(random_state=0, tol=1e-3, **solver)
824
+ W32 = nmf32.fit_transform(X.astype(np.float32))
825
+ nmf64 = Estimator(random_state=0, tol=1e-3, **solver)
826
+ W64 = nmf64.fit_transform(X)
827
+
828
+ assert_allclose(W32, W64, atol=1e-5)
829
+
830
+
831
+ # TODO(1.6): remove the warning filter
832
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
833
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
834
+ def test_nmf_custom_init_dtype_error(Estimator):
835
+ # Check that an error is raise if custom H and/or W don't have the same
836
+ # dtype as X.
837
+ rng = np.random.RandomState(0)
838
+ X = rng.random_sample((20, 15))
839
+ H = rng.random_sample((15, 15)).astype(np.float32)
840
+ W = rng.random_sample((20, 15))
841
+
842
+ with pytest.raises(TypeError, match="should have the same dtype as X"):
843
+ Estimator(init="custom").fit(X, H=H, W=W)
844
+
845
+ with pytest.raises(TypeError, match="should have the same dtype as X"):
846
+ non_negative_factorization(X, H=H, update_H=False)
847
+
848
+
849
+ @pytest.mark.parametrize("beta_loss", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5])
850
+ def test_nmf_minibatchnmf_equivalence(beta_loss):
851
+ # Test that MiniBatchNMF is equivalent to NMF when batch_size = n_samples and
852
+ # forget_factor 0.0 (stopping criterion put aside)
853
+ rng = np.random.mtrand.RandomState(42)
854
+ X = np.abs(rng.randn(48, 5))
855
+
856
+ nmf = NMF(
857
+ n_components=5,
858
+ beta_loss=beta_loss,
859
+ solver="mu",
860
+ random_state=0,
861
+ tol=0,
862
+ )
863
+ mbnmf = MiniBatchNMF(
864
+ n_components=5,
865
+ beta_loss=beta_loss,
866
+ random_state=0,
867
+ tol=0,
868
+ max_no_improvement=None,
869
+ batch_size=X.shape[0],
870
+ forget_factor=0.0,
871
+ )
872
+ W = nmf.fit_transform(X)
873
+ mbW = mbnmf.fit_transform(X)
874
+ assert_allclose(W, mbW)
875
+
876
+
877
+ def test_minibatch_nmf_partial_fit():
878
+ # Check fit / partial_fit equivalence. Applicable only with fresh restarts.
879
+ rng = np.random.mtrand.RandomState(42)
880
+ X = np.abs(rng.randn(100, 5))
881
+
882
+ n_components = 5
883
+ batch_size = 10
884
+ max_iter = 2
885
+
886
+ mbnmf1 = MiniBatchNMF(
887
+ n_components=n_components,
888
+ init="custom",
889
+ random_state=0,
890
+ max_iter=max_iter,
891
+ batch_size=batch_size,
892
+ tol=0,
893
+ max_no_improvement=None,
894
+ fresh_restarts=False,
895
+ )
896
+ mbnmf2 = MiniBatchNMF(n_components=n_components, init="custom", random_state=0)
897
+
898
+ # Force the same init of H (W is recomputed anyway) to be able to compare results.
899
+ W, H = nmf._initialize_nmf(
900
+ X, n_components=n_components, init="random", random_state=0
901
+ )
902
+
903
+ mbnmf1.fit(X, W=W, H=H)
904
+ for i in range(max_iter):
905
+ for j in range(batch_size):
906
+ mbnmf2.partial_fit(X[j : j + batch_size], W=W[:batch_size], H=H)
907
+
908
+ assert mbnmf1.n_steps_ == mbnmf2.n_steps_
909
+ assert_allclose(mbnmf1.components_, mbnmf2.components_)
910
+
911
+
912
+ def test_feature_names_out():
913
+ """Check feature names out for NMF."""
914
+ random_state = np.random.RandomState(0)
915
+ X = np.abs(random_state.randn(10, 4))
916
+ nmf = NMF(n_components=3).fit(X)
917
+
918
+ names = nmf.get_feature_names_out()
919
+ assert_array_equal([f"nmf{i}" for i in range(3)], names)
920
+
921
+
922
+ # TODO(1.6): remove the warning filter
923
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
924
+ def test_minibatch_nmf_verbose():
925
+ # Check verbose mode of MiniBatchNMF for better coverage.
926
+ A = np.random.RandomState(0).random_sample((100, 10))
927
+ nmf = MiniBatchNMF(tol=1e-2, random_state=0, verbose=1)
928
+ old_stdout = sys.stdout
929
+ sys.stdout = StringIO()
930
+ try:
931
+ nmf.fit(A)
932
+ finally:
933
+ sys.stdout = old_stdout
934
+
935
+
936
+ # TODO(1.5): remove this test
937
+ def test_NMF_inverse_transform_W_deprecation():
938
+ rng = np.random.mtrand.RandomState(42)
939
+ A = np.abs(rng.randn(6, 5))
940
+ est = NMF(
941
+ n_components=3,
942
+ init="random",
943
+ random_state=0,
944
+ tol=1e-6,
945
+ )
946
+ Xt = est.fit_transform(A)
947
+
948
+ with pytest.raises(TypeError, match="Missing required positional argument"):
949
+ est.inverse_transform()
950
+
951
+ with pytest.raises(ValueError, match="Please provide only"):
952
+ est.inverse_transform(Xt=Xt, W=Xt)
953
+
954
+ with warnings.catch_warnings(record=True):
955
+ warnings.simplefilter("error")
956
+ est.inverse_transform(Xt)
957
+
958
+ with pytest.warns(FutureWarning, match="Input argument `W` was renamed to `Xt`"):
959
+ est.inverse_transform(W=Xt)
960
+
961
+
962
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
963
+ def test_nmf_n_components_auto(Estimator):
964
+ # Check that n_components is correctly inferred
965
+ # from the provided custom initialization.
966
+ rng = np.random.RandomState(0)
967
+ X = rng.random_sample((6, 5))
968
+ W = rng.random_sample((6, 2))
969
+ H = rng.random_sample((2, 5))
970
+ est = Estimator(
971
+ n_components="auto",
972
+ init="custom",
973
+ random_state=0,
974
+ tol=1e-6,
975
+ )
976
+ est.fit_transform(X, W=W, H=H)
977
+ assert est._n_components == H.shape[0]
978
+
979
+
980
+ def test_nmf_non_negative_factorization_n_components_auto():
981
+ # Check that n_components is correctly inferred from the provided
982
+ # custom initialization.
983
+ rng = np.random.RandomState(0)
984
+ X = rng.random_sample((6, 5))
985
+ W_init = rng.random_sample((6, 2))
986
+ H_init = rng.random_sample((2, 5))
987
+ W, H, _ = non_negative_factorization(
988
+ X, W=W_init, H=H_init, init="custom", n_components="auto"
989
+ )
990
+ assert H.shape == H_init.shape
991
+ assert W.shape == W_init.shape
992
+
993
+
994
+ # TODO(1.6): remove
995
+ def test_nmf_n_components_default_value_warning():
996
+ rng = np.random.RandomState(0)
997
+ X = rng.random_sample((6, 5))
998
+ H = rng.random_sample((2, 5))
999
+ with pytest.warns(
1000
+ FutureWarning, match="The default value of `n_components` will change from"
1001
+ ):
1002
+ non_negative_factorization(X, H=H)
1003
+
1004
+
1005
+ def test_nmf_n_components_auto_no_h_update():
1006
+ # Tests that non_negative_factorization does not fail when setting
1007
+ # n_components="auto" also tests that the inferred n_component
1008
+ # value is the right one.
1009
+ rng = np.random.RandomState(0)
1010
+ X = rng.random_sample((6, 5))
1011
+ H_true = rng.random_sample((2, 5))
1012
+ W, H, _ = non_negative_factorization(
1013
+ X, H=H_true, n_components="auto", update_H=False
1014
+ ) # should not fail
1015
+ assert_allclose(H, H_true)
1016
+ assert W.shape == (X.shape[0], H_true.shape[0])
1017
+
1018
+
1019
+ def test_nmf_w_h_not_used_warning():
1020
+ # Check that warnings are raised if user provided W and H are not used
1021
+ # and initialization overrides value of W or H
1022
+ rng = np.random.RandomState(0)
1023
+ X = rng.random_sample((6, 5))
1024
+ W_init = rng.random_sample((6, 2))
1025
+ H_init = rng.random_sample((2, 5))
1026
+ with pytest.warns(
1027
+ RuntimeWarning,
1028
+ match="When init!='custom', provided W or H are ignored",
1029
+ ):
1030
+ non_negative_factorization(X, H=H_init, update_H=True, n_components="auto")
1031
+
1032
+ with pytest.warns(
1033
+ RuntimeWarning,
1034
+ match="When init!='custom', provided W or H are ignored",
1035
+ ):
1036
+ non_negative_factorization(
1037
+ X, W=W_init, H=H_init, update_H=True, n_components="auto"
1038
+ )
1039
+
1040
+ with pytest.warns(
1041
+ RuntimeWarning, match="When update_H=False, the provided initial W is not used."
1042
+ ):
1043
+ # When update_H is False, W is ignored regardless of init
1044
+ # TODO: use the provided W when init="custom".
1045
+ non_negative_factorization(
1046
+ X, W=W_init, H=H_init, update_H=False, n_components="auto"
1047
+ )
1048
+
1049
+
1050
+ def test_nmf_custom_init_shape_error():
1051
+ # Check that an informative error is raised when custom initialization does not
1052
+ # have the right shape
1053
+ rng = np.random.RandomState(0)
1054
+ X = rng.random_sample((6, 5))
1055
+ H = rng.random_sample((2, 5))
1056
+ nmf = NMF(n_components=2, init="custom", random_state=0)
1057
+
1058
+ with pytest.raises(ValueError, match="Array with wrong first dimension passed"):
1059
+ nmf.fit(X, H=H, W=rng.random_sample((5, 2)))
1060
+
1061
+ with pytest.raises(ValueError, match="Array with wrong second dimension passed"):
1062
+ nmf.fit(X, H=H, W=rng.random_sample((6, 3)))
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from io import StringIO
3
+
4
+ import numpy as np
5
+ import pytest
6
+ from numpy.testing import assert_array_equal
7
+ from scipy.linalg import block_diag
8
+ from scipy.special import psi
9
+
10
+ from sklearn.decomposition import LatentDirichletAllocation
11
+ from sklearn.decomposition._online_lda_fast import (
12
+ _dirichlet_expectation_1d,
13
+ _dirichlet_expectation_2d,
14
+ )
15
+ from sklearn.exceptions import NotFittedError
16
+ from sklearn.utils._testing import (
17
+ assert_allclose,
18
+ assert_almost_equal,
19
+ assert_array_almost_equal,
20
+ if_safe_multiprocessing_with_blas,
21
+ )
22
+ from sklearn.utils.fixes import CSR_CONTAINERS
23
+
24
+
25
+ def _build_sparse_array(csr_container):
26
+ # Create 3 topics and each topic has 3 distinct words.
27
+ # (Each word only belongs to a single topic.)
28
+ n_components = 3
29
+ block = np.full((3, 3), n_components, dtype=int)
30
+ blocks = [block] * n_components
31
+ X = block_diag(*blocks)
32
+ X = csr_container(X)
33
+ return (n_components, X)
34
+
35
+
36
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
37
+ def test_lda_default_prior_params(csr_container):
38
+ # default prior parameter should be `1 / topics`
39
+ # and verbose params should not affect result
40
+ n_components, X = _build_sparse_array(csr_container)
41
+ prior = 1.0 / n_components
42
+ lda_1 = LatentDirichletAllocation(
43
+ n_components=n_components,
44
+ doc_topic_prior=prior,
45
+ topic_word_prior=prior,
46
+ random_state=0,
47
+ )
48
+ lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0)
49
+ topic_distr_1 = lda_1.fit_transform(X)
50
+ topic_distr_2 = lda_2.fit_transform(X)
51
+ assert_almost_equal(topic_distr_1, topic_distr_2)
52
+
53
+
54
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
55
+ def test_lda_fit_batch(csr_container):
56
+ # Test LDA batch learning_offset (`fit` method with 'batch' learning)
57
+ rng = np.random.RandomState(0)
58
+ n_components, X = _build_sparse_array(csr_container)
59
+ lda = LatentDirichletAllocation(
60
+ n_components=n_components,
61
+ evaluate_every=1,
62
+ learning_method="batch",
63
+ random_state=rng,
64
+ )
65
+ lda.fit(X)
66
+
67
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
68
+ for component in lda.components_:
69
+ # Find top 3 words in each LDA component
70
+ top_idx = set(component.argsort()[-3:][::-1])
71
+ assert tuple(sorted(top_idx)) in correct_idx_grps
72
+
73
+
74
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
75
+ def test_lda_fit_online(csr_container):
76
+ # Test LDA online learning (`fit` method with 'online' learning)
77
+ rng = np.random.RandomState(0)
78
+ n_components, X = _build_sparse_array(csr_container)
79
+ lda = LatentDirichletAllocation(
80
+ n_components=n_components,
81
+ learning_offset=10.0,
82
+ evaluate_every=1,
83
+ learning_method="online",
84
+ random_state=rng,
85
+ )
86
+ lda.fit(X)
87
+
88
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
89
+ for component in lda.components_:
90
+ # Find top 3 words in each LDA component
91
+ top_idx = set(component.argsort()[-3:][::-1])
92
+ assert tuple(sorted(top_idx)) in correct_idx_grps
93
+
94
+
95
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
96
+ def test_lda_partial_fit(csr_container):
97
+ # Test LDA online learning (`partial_fit` method)
98
+ # (same as test_lda_batch)
99
+ rng = np.random.RandomState(0)
100
+ n_components, X = _build_sparse_array(csr_container)
101
+ lda = LatentDirichletAllocation(
102
+ n_components=n_components,
103
+ learning_offset=10.0,
104
+ total_samples=100,
105
+ random_state=rng,
106
+ )
107
+ for i in range(3):
108
+ lda.partial_fit(X)
109
+
110
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
111
+ for c in lda.components_:
112
+ top_idx = set(c.argsort()[-3:][::-1])
113
+ assert tuple(sorted(top_idx)) in correct_idx_grps
114
+
115
+
116
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
117
+ def test_lda_dense_input(csr_container):
118
+ # Test LDA with dense input.
119
+ rng = np.random.RandomState(0)
120
+ n_components, X = _build_sparse_array(csr_container)
121
+ lda = LatentDirichletAllocation(
122
+ n_components=n_components, learning_method="batch", random_state=rng
123
+ )
124
+ lda.fit(X.toarray())
125
+
126
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
127
+ for component in lda.components_:
128
+ # Find top 3 words in each LDA component
129
+ top_idx = set(component.argsort()[-3:][::-1])
130
+ assert tuple(sorted(top_idx)) in correct_idx_grps
131
+
132
+
133
+ def test_lda_transform():
134
+ # Test LDA transform.
135
+ # Transform result cannot be negative and should be normalized
136
+ rng = np.random.RandomState(0)
137
+ X = rng.randint(5, size=(20, 10))
138
+ n_components = 3
139
+ lda = LatentDirichletAllocation(n_components=n_components, random_state=rng)
140
+ X_trans = lda.fit_transform(X)
141
+ assert (X_trans > 0.0).any()
142
+ assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
143
+
144
+
145
+ @pytest.mark.parametrize("method", ("online", "batch"))
146
+ def test_lda_fit_transform(method):
147
+ # Test LDA fit_transform & transform
148
+ # fit_transform and transform result should be the same
149
+ rng = np.random.RandomState(0)
150
+ X = rng.randint(10, size=(50, 20))
151
+ lda = LatentDirichletAllocation(
152
+ n_components=5, learning_method=method, random_state=rng
153
+ )
154
+ X_fit = lda.fit_transform(X)
155
+ X_trans = lda.transform(X)
156
+ assert_array_almost_equal(X_fit, X_trans, 4)
157
+
158
+
159
+ def test_lda_negative_input():
160
+ # test pass dense matrix with sparse negative input.
161
+ X = np.full((5, 10), -1.0)
162
+ lda = LatentDirichletAllocation()
163
+ regex = r"^Negative values in data passed"
164
+ with pytest.raises(ValueError, match=regex):
165
+ lda.fit(X)
166
+
167
+
168
+ def test_lda_no_component_error():
169
+ # test `perplexity` before `fit`
170
+ rng = np.random.RandomState(0)
171
+ X = rng.randint(4, size=(20, 10))
172
+ lda = LatentDirichletAllocation()
173
+ regex = (
174
+ "This LatentDirichletAllocation instance is not fitted yet. "
175
+ "Call 'fit' with appropriate arguments before using this "
176
+ "estimator."
177
+ )
178
+ with pytest.raises(NotFittedError, match=regex):
179
+ lda.perplexity(X)
180
+
181
+
182
+ @if_safe_multiprocessing_with_blas
183
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
184
+ @pytest.mark.parametrize("method", ("online", "batch"))
185
+ def test_lda_multi_jobs(method, csr_container):
186
+ n_components, X = _build_sparse_array(csr_container)
187
+ # Test LDA batch training with multi CPU
188
+ rng = np.random.RandomState(0)
189
+ lda = LatentDirichletAllocation(
190
+ n_components=n_components,
191
+ n_jobs=2,
192
+ learning_method=method,
193
+ evaluate_every=1,
194
+ random_state=rng,
195
+ )
196
+ lda.fit(X)
197
+
198
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
199
+ for c in lda.components_:
200
+ top_idx = set(c.argsort()[-3:][::-1])
201
+ assert tuple(sorted(top_idx)) in correct_idx_grps
202
+
203
+
204
+ @if_safe_multiprocessing_with_blas
205
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
206
+ def test_lda_partial_fit_multi_jobs(csr_container):
207
+ # Test LDA online training with multi CPU
208
+ rng = np.random.RandomState(0)
209
+ n_components, X = _build_sparse_array(csr_container)
210
+ lda = LatentDirichletAllocation(
211
+ n_components=n_components,
212
+ n_jobs=2,
213
+ learning_offset=5.0,
214
+ total_samples=30,
215
+ random_state=rng,
216
+ )
217
+ for i in range(2):
218
+ lda.partial_fit(X)
219
+
220
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
221
+ for c in lda.components_:
222
+ top_idx = set(c.argsort()[-3:][::-1])
223
+ assert tuple(sorted(top_idx)) in correct_idx_grps
224
+
225
+
226
+ def test_lda_preplexity_mismatch():
227
+ # test dimension mismatch in `perplexity` method
228
+ rng = np.random.RandomState(0)
229
+ n_components = rng.randint(3, 6)
230
+ n_samples = rng.randint(6, 10)
231
+ X = np.random.randint(4, size=(n_samples, 10))
232
+ lda = LatentDirichletAllocation(
233
+ n_components=n_components,
234
+ learning_offset=5.0,
235
+ total_samples=20,
236
+ random_state=rng,
237
+ )
238
+ lda.fit(X)
239
+ # invalid samples
240
+ invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
241
+ with pytest.raises(ValueError, match=r"Number of samples"):
242
+ lda._perplexity_precomp_distr(X, invalid_n_samples)
243
+ # invalid topic number
244
+ invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
245
+ with pytest.raises(ValueError, match=r"Number of topics"):
246
+ lda._perplexity_precomp_distr(X, invalid_n_components)
247
+
248
+
249
+ @pytest.mark.parametrize("method", ("online", "batch"))
250
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
251
+ def test_lda_perplexity(method, csr_container):
252
+ # Test LDA perplexity for batch training
253
+ # perplexity should be lower after each iteration
254
+ n_components, X = _build_sparse_array(csr_container)
255
+ lda_1 = LatentDirichletAllocation(
256
+ n_components=n_components,
257
+ max_iter=1,
258
+ learning_method=method,
259
+ total_samples=100,
260
+ random_state=0,
261
+ )
262
+ lda_2 = LatentDirichletAllocation(
263
+ n_components=n_components,
264
+ max_iter=10,
265
+ learning_method=method,
266
+ total_samples=100,
267
+ random_state=0,
268
+ )
269
+ lda_1.fit(X)
270
+ perp_1 = lda_1.perplexity(X, sub_sampling=False)
271
+
272
+ lda_2.fit(X)
273
+ perp_2 = lda_2.perplexity(X, sub_sampling=False)
274
+ assert perp_1 >= perp_2
275
+
276
+ perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
277
+ perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
278
+ assert perp_1_subsampling >= perp_2_subsampling
279
+
280
+
281
+ @pytest.mark.parametrize("method", ("online", "batch"))
282
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
283
+ def test_lda_score(method, csr_container):
284
+ # Test LDA score for batch training
285
+ # score should be higher after each iteration
286
+ n_components, X = _build_sparse_array(csr_container)
287
+ lda_1 = LatentDirichletAllocation(
288
+ n_components=n_components,
289
+ max_iter=1,
290
+ learning_method=method,
291
+ total_samples=100,
292
+ random_state=0,
293
+ )
294
+ lda_2 = LatentDirichletAllocation(
295
+ n_components=n_components,
296
+ max_iter=10,
297
+ learning_method=method,
298
+ total_samples=100,
299
+ random_state=0,
300
+ )
301
+ lda_1.fit_transform(X)
302
+ score_1 = lda_1.score(X)
303
+
304
+ lda_2.fit_transform(X)
305
+ score_2 = lda_2.score(X)
306
+ assert score_2 >= score_1
307
+
308
+
309
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
310
+ def test_perplexity_input_format(csr_container):
311
+ # Test LDA perplexity for sparse and dense input
312
+ # score should be the same for both dense and sparse input
313
+ n_components, X = _build_sparse_array(csr_container)
314
+ lda = LatentDirichletAllocation(
315
+ n_components=n_components,
316
+ max_iter=1,
317
+ learning_method="batch",
318
+ total_samples=100,
319
+ random_state=0,
320
+ )
321
+ lda.fit(X)
322
+ perp_1 = lda.perplexity(X)
323
+ perp_2 = lda.perplexity(X.toarray())
324
+ assert_almost_equal(perp_1, perp_2)
325
+
326
+
327
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
328
+ def test_lda_score_perplexity(csr_container):
329
+ # Test the relationship between LDA score and perplexity
330
+ n_components, X = _build_sparse_array(csr_container)
331
+ lda = LatentDirichletAllocation(
332
+ n_components=n_components, max_iter=10, random_state=0
333
+ )
334
+ lda.fit(X)
335
+ perplexity_1 = lda.perplexity(X, sub_sampling=False)
336
+
337
+ score = lda.score(X)
338
+ perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data)))
339
+ assert_almost_equal(perplexity_1, perplexity_2)
340
+
341
+
342
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
343
+ def test_lda_fit_perplexity(csr_container):
344
+ # Test that the perplexity computed during fit is consistent with what is
345
+ # returned by the perplexity method
346
+ n_components, X = _build_sparse_array(csr_container)
347
+ lda = LatentDirichletAllocation(
348
+ n_components=n_components,
349
+ max_iter=1,
350
+ learning_method="batch",
351
+ random_state=0,
352
+ evaluate_every=1,
353
+ )
354
+ lda.fit(X)
355
+
356
+ # Perplexity computed at end of fit method
357
+ perplexity1 = lda.bound_
358
+
359
+ # Result of perplexity method on the train set
360
+ perplexity2 = lda.perplexity(X)
361
+
362
+ assert_almost_equal(perplexity1, perplexity2)
363
+
364
+
365
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
366
+ def test_lda_empty_docs(csr_container):
367
+ """Test LDA on empty document (all-zero rows)."""
368
+ Z = np.zeros((5, 4))
369
+ for X in [Z, csr_container(Z)]:
370
+ lda = LatentDirichletAllocation(max_iter=750).fit(X)
371
+ assert_almost_equal(
372
+ lda.components_.sum(axis=0), np.ones(lda.components_.shape[1])
373
+ )
374
+
375
+
376
+ def test_dirichlet_expectation():
377
+ """Test Cython version of Dirichlet expectation calculation."""
378
+ x = np.logspace(-100, 10, 10000)
379
+ expectation = np.empty_like(x)
380
+ _dirichlet_expectation_1d(x, 0, expectation)
381
+ assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19)
382
+
383
+ x = x.reshape(100, 100)
384
+ assert_allclose(
385
+ _dirichlet_expectation_2d(x),
386
+ psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
387
+ rtol=1e-11,
388
+ atol=3e-9,
389
+ )
390
+
391
+
392
+ def check_verbosity(
393
+ verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
394
+ ):
395
+ n_components, X = _build_sparse_array(csr_container)
396
+ lda = LatentDirichletAllocation(
397
+ n_components=n_components,
398
+ max_iter=3,
399
+ learning_method="batch",
400
+ verbose=verbose,
401
+ evaluate_every=evaluate_every,
402
+ random_state=0,
403
+ )
404
+ out = StringIO()
405
+ old_out, sys.stdout = sys.stdout, out
406
+ try:
407
+ lda.fit(X)
408
+ finally:
409
+ sys.stdout = old_out
410
+
411
+ n_lines = out.getvalue().count("\n")
412
+ n_perplexity = out.getvalue().count("perplexity")
413
+ assert expected_lines == n_lines
414
+ assert expected_perplexities == n_perplexity
415
+
416
+
417
+ @pytest.mark.parametrize(
418
+ "verbose,evaluate_every,expected_lines,expected_perplexities",
419
+ [
420
+ (False, 1, 0, 0),
421
+ (False, 0, 0, 0),
422
+ (True, 0, 3, 0),
423
+ (True, 1, 3, 3),
424
+ (True, 2, 3, 1),
425
+ ],
426
+ )
427
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
428
+ def test_verbosity(
429
+ verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
430
+ ):
431
+ check_verbosity(
432
+ verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
433
+ )
434
+
435
+
436
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
437
+ def test_lda_feature_names_out(csr_container):
438
+ """Check feature names out for LatentDirichletAllocation."""
439
+ n_components, X = _build_sparse_array(csr_container)
440
+ lda = LatentDirichletAllocation(n_components=n_components).fit(X)
441
+
442
+ names = lda.get_feature_names_out()
443
+ assert_array_equal(
444
+ [f"latentdirichletallocation{i}" for i in range(n_components)], names
445
+ )
446
+
447
+
448
+ @pytest.mark.parametrize("learning_method", ("batch", "online"))
449
+ def test_lda_dtype_match(learning_method, global_dtype):
450
+ """Check data type preservation of fitted attributes."""
451
+ rng = np.random.RandomState(0)
452
+ X = rng.uniform(size=(20, 10)).astype(global_dtype, copy=False)
453
+
454
+ lda = LatentDirichletAllocation(
455
+ n_components=5, random_state=0, learning_method=learning_method
456
+ )
457
+ lda.fit(X)
458
+ assert lda.components_.dtype == global_dtype
459
+ assert lda.exp_dirichlet_component_.dtype == global_dtype
460
+
461
+
462
+ @pytest.mark.parametrize("learning_method", ("batch", "online"))
463
+ def test_lda_numerical_consistency(learning_method, global_random_seed):
464
+ """Check numerical consistency between np.float32 and np.float64."""
465
+ rng = np.random.RandomState(global_random_seed)
466
+ X64 = rng.uniform(size=(20, 10))
467
+ X32 = X64.astype(np.float32)
468
+
469
+ lda_64 = LatentDirichletAllocation(
470
+ n_components=5, random_state=global_random_seed, learning_method=learning_method
471
+ ).fit(X64)
472
+ lda_32 = LatentDirichletAllocation(
473
+ n_components=5, random_state=global_random_seed, learning_method=learning_method
474
+ ).fit(X32)
475
+
476
+ assert_allclose(lda_32.components_, lda_64.components_)
477
+ assert_allclose(lda_32.transform(X32), lda_64.transform(X64))
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py ADDED
@@ -0,0 +1,987 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import warnings
3
+
4
+ import numpy as np
5
+ import pytest
6
+ import scipy as sp
7
+ from numpy.testing import assert_array_equal
8
+
9
+ from sklearn import config_context, datasets
10
+ from sklearn.base import clone
11
+ from sklearn.datasets import load_iris, make_classification
12
+ from sklearn.decomposition import PCA
13
+ from sklearn.decomposition._pca import _assess_dimension, _infer_dimension
14
+ from sklearn.utils._array_api import (
15
+ _atol_for_type,
16
+ _convert_to_numpy,
17
+ yield_namespace_device_dtype_combinations,
18
+ )
19
+ from sklearn.utils._array_api import device as array_device
20
+ from sklearn.utils._testing import _array_api_for_tests, assert_allclose
21
+ from sklearn.utils.estimator_checks import (
22
+ _get_check_estimator_ids,
23
+ check_array_api_input_and_values,
24
+ )
25
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
26
+
27
+ iris = datasets.load_iris()
28
+ PCA_SOLVERS = ["full", "arpack", "randomized", "auto"]
29
+
30
+ # `SPARSE_M` and `SPARSE_N` could be larger, but be aware:
31
+ # * SciPy's generation of random sparse matrix can be costly
32
+ # * A (SPARSE_M, SPARSE_N) dense array is allocated to compare against
33
+ SPARSE_M, SPARSE_N = 1000, 300 # arbitrary
34
+ SPARSE_MAX_COMPONENTS = min(SPARSE_M, SPARSE_N)
35
+
36
+
37
+ def _check_fitted_pca_close(pca1, pca2, rtol):
38
+ assert_allclose(pca1.components_, pca2.components_, rtol=rtol)
39
+ assert_allclose(pca1.explained_variance_, pca2.explained_variance_, rtol=rtol)
40
+ assert_allclose(pca1.singular_values_, pca2.singular_values_, rtol=rtol)
41
+ assert_allclose(pca1.mean_, pca2.mean_, rtol=rtol)
42
+ assert_allclose(pca1.n_components_, pca2.n_components_, rtol=rtol)
43
+ assert_allclose(pca1.n_samples_, pca2.n_samples_, rtol=rtol)
44
+ assert_allclose(pca1.noise_variance_, pca2.noise_variance_, rtol=rtol)
45
+ assert_allclose(pca1.n_features_in_, pca2.n_features_in_, rtol=rtol)
46
+
47
+
48
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
49
+ @pytest.mark.parametrize("n_components", range(1, iris.data.shape[1]))
50
+ def test_pca(svd_solver, n_components):
51
+ X = iris.data
52
+ pca = PCA(n_components=n_components, svd_solver=svd_solver)
53
+
54
+ # check the shape of fit.transform
55
+ X_r = pca.fit(X).transform(X)
56
+ assert X_r.shape[1] == n_components
57
+
58
+ # check the equivalence of fit.transform and fit_transform
59
+ X_r2 = pca.fit_transform(X)
60
+ assert_allclose(X_r, X_r2)
61
+ X_r = pca.transform(X)
62
+ assert_allclose(X_r, X_r2)
63
+
64
+ # Test get_covariance and get_precision
65
+ cov = pca.get_covariance()
66
+ precision = pca.get_precision()
67
+ assert_allclose(np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-12)
68
+
69
+
70
+ @pytest.mark.parametrize("density", [0.01, 0.1, 0.30])
71
+ @pytest.mark.parametrize("n_components", [1, 2, 10])
72
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
73
+ @pytest.mark.parametrize("svd_solver", ["arpack"])
74
+ @pytest.mark.parametrize("scale", [1, 10, 100])
75
+ def test_pca_sparse(
76
+ global_random_seed, svd_solver, sparse_container, n_components, density, scale
77
+ ):
78
+ # Make sure any tolerance changes pass with SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all"
79
+ rtol = 5e-07
80
+ transform_rtol = 3e-05
81
+
82
+ random_state = np.random.default_rng(global_random_seed)
83
+ X = sparse_container(
84
+ sp.sparse.random(
85
+ SPARSE_M,
86
+ SPARSE_N,
87
+ random_state=random_state,
88
+ density=density,
89
+ )
90
+ )
91
+ # Scale the data + vary the column means
92
+ scale_vector = random_state.random(X.shape[1]) * scale
93
+ X = X.multiply(scale_vector)
94
+
95
+ pca = PCA(
96
+ n_components=n_components,
97
+ svd_solver=svd_solver,
98
+ random_state=global_random_seed,
99
+ )
100
+ pca.fit(X)
101
+
102
+ Xd = X.toarray()
103
+ pcad = PCA(
104
+ n_components=n_components,
105
+ svd_solver=svd_solver,
106
+ random_state=global_random_seed,
107
+ )
108
+ pcad.fit(Xd)
109
+
110
+ # Fitted attributes equality
111
+ _check_fitted_pca_close(pca, pcad, rtol=rtol)
112
+
113
+ # Test transform
114
+ X2 = sparse_container(
115
+ sp.sparse.random(
116
+ SPARSE_M,
117
+ SPARSE_N,
118
+ random_state=random_state,
119
+ density=density,
120
+ )
121
+ )
122
+ X2d = X2.toarray()
123
+
124
+ assert_allclose(pca.transform(X2), pca.transform(X2d), rtol=transform_rtol)
125
+ assert_allclose(pca.transform(X2), pcad.transform(X2d), rtol=transform_rtol)
126
+
127
+
128
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
129
+ def test_pca_sparse_fit_transform(global_random_seed, sparse_container):
130
+ random_state = np.random.default_rng(global_random_seed)
131
+ X = sparse_container(
132
+ sp.sparse.random(
133
+ SPARSE_M,
134
+ SPARSE_N,
135
+ random_state=random_state,
136
+ density=0.01,
137
+ )
138
+ )
139
+ X2 = sparse_container(
140
+ sp.sparse.random(
141
+ SPARSE_M,
142
+ SPARSE_N,
143
+ random_state=random_state,
144
+ density=0.01,
145
+ )
146
+ )
147
+
148
+ pca_fit = PCA(n_components=10, svd_solver="arpack", random_state=global_random_seed)
149
+ pca_fit_transform = PCA(
150
+ n_components=10, svd_solver="arpack", random_state=global_random_seed
151
+ )
152
+
153
+ pca_fit.fit(X)
154
+ transformed_X = pca_fit_transform.fit_transform(X)
155
+
156
+ _check_fitted_pca_close(pca_fit, pca_fit_transform, rtol=1e-10)
157
+ assert_allclose(transformed_X, pca_fit_transform.transform(X), rtol=2e-9)
158
+ assert_allclose(transformed_X, pca_fit.transform(X), rtol=2e-9)
159
+ assert_allclose(pca_fit.transform(X2), pca_fit_transform.transform(X2), rtol=2e-9)
160
+
161
+
162
+ @pytest.mark.parametrize("svd_solver", ["randomized", "full", "auto"])
163
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
164
+ def test_sparse_pca_solver_error(global_random_seed, svd_solver, sparse_container):
165
+ random_state = np.random.RandomState(global_random_seed)
166
+ X = sparse_container(
167
+ sp.sparse.random(
168
+ SPARSE_M,
169
+ SPARSE_N,
170
+ random_state=random_state,
171
+ )
172
+ )
173
+ pca = PCA(n_components=30, svd_solver=svd_solver)
174
+ error_msg_pattern = (
175
+ f'PCA only support sparse inputs with the "arpack" solver, while "{svd_solver}"'
176
+ " was passed"
177
+ )
178
+ with pytest.raises(TypeError, match=error_msg_pattern):
179
+ pca.fit(X)
180
+
181
+
182
+ def test_no_empty_slice_warning():
183
+ # test if we avoid numpy warnings for computing over empty arrays
184
+ n_components = 10
185
+ n_features = n_components + 2 # anything > n_comps triggered it in 0.16
186
+ X = np.random.uniform(-1, 1, size=(n_components, n_features))
187
+ pca = PCA(n_components=n_components)
188
+ with warnings.catch_warnings():
189
+ warnings.simplefilter("error", RuntimeWarning)
190
+ pca.fit(X)
191
+
192
+
193
+ @pytest.mark.parametrize("copy", [True, False])
194
+ @pytest.mark.parametrize("solver", PCA_SOLVERS)
195
+ def test_whitening(solver, copy):
196
+ # Check that PCA output has unit-variance
197
+ rng = np.random.RandomState(0)
198
+ n_samples = 100
199
+ n_features = 80
200
+ n_components = 30
201
+ rank = 50
202
+
203
+ # some low rank data with correlated features
204
+ X = np.dot(
205
+ rng.randn(n_samples, rank),
206
+ np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features)),
207
+ )
208
+ # the component-wise variance of the first 50 features is 3 times the
209
+ # mean component-wise variance of the remaining 30 features
210
+ X[:, :50] *= 3
211
+
212
+ assert X.shape == (n_samples, n_features)
213
+
214
+ # the component-wise variance is thus highly varying:
215
+ assert X.std(axis=0).std() > 43.8
216
+
217
+ # whiten the data while projecting to the lower dim subspace
218
+ X_ = X.copy() # make sure we keep an original across iterations.
219
+ pca = PCA(
220
+ n_components=n_components,
221
+ whiten=True,
222
+ copy=copy,
223
+ svd_solver=solver,
224
+ random_state=0,
225
+ iterated_power=7,
226
+ )
227
+ # test fit_transform
228
+ X_whitened = pca.fit_transform(X_.copy())
229
+ assert X_whitened.shape == (n_samples, n_components)
230
+ X_whitened2 = pca.transform(X_)
231
+ assert_allclose(X_whitened, X_whitened2, rtol=5e-4)
232
+
233
+ assert_allclose(X_whitened.std(ddof=1, axis=0), np.ones(n_components))
234
+ assert_allclose(X_whitened.mean(axis=0), np.zeros(n_components), atol=1e-12)
235
+
236
+ X_ = X.copy()
237
+ pca = PCA(
238
+ n_components=n_components, whiten=False, copy=copy, svd_solver=solver
239
+ ).fit(X_.copy())
240
+ X_unwhitened = pca.transform(X_)
241
+ assert X_unwhitened.shape == (n_samples, n_components)
242
+
243
+ # in that case the output components still have varying variances
244
+ assert X_unwhitened.std(axis=0).std() == pytest.approx(74.1, rel=1e-1)
245
+ # we always center, so no test for non-centering.
246
+
247
+
248
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
249
+ def test_pca_explained_variance_equivalence_solver(svd_solver):
250
+ rng = np.random.RandomState(0)
251
+ n_samples, n_features = 100, 80
252
+ X = rng.randn(n_samples, n_features)
253
+
254
+ pca_full = PCA(n_components=2, svd_solver="full")
255
+ pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=0)
256
+
257
+ pca_full.fit(X)
258
+ pca_other.fit(X)
259
+
260
+ assert_allclose(
261
+ pca_full.explained_variance_, pca_other.explained_variance_, rtol=5e-2
262
+ )
263
+ assert_allclose(
264
+ pca_full.explained_variance_ratio_,
265
+ pca_other.explained_variance_ratio_,
266
+ rtol=5e-2,
267
+ )
268
+
269
+
270
+ @pytest.mark.parametrize(
271
+ "X",
272
+ [
273
+ np.random.RandomState(0).randn(100, 80),
274
+ datasets.make_classification(100, 80, n_informative=78, random_state=0)[0],
275
+ ],
276
+ ids=["random-data", "correlated-data"],
277
+ )
278
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
279
+ def test_pca_explained_variance_empirical(X, svd_solver):
280
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=0)
281
+ X_pca = pca.fit_transform(X)
282
+ assert_allclose(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0))
283
+
284
+ expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0]
285
+ expected_result = sorted(expected_result, reverse=True)[:2]
286
+ assert_allclose(pca.explained_variance_, expected_result, rtol=5e-3)
287
+
288
+
289
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
290
+ def test_pca_singular_values_consistency(svd_solver):
291
+ rng = np.random.RandomState(0)
292
+ n_samples, n_features = 100, 80
293
+ X = rng.randn(n_samples, n_features)
294
+
295
+ pca_full = PCA(n_components=2, svd_solver="full", random_state=rng)
296
+ pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
297
+
298
+ pca_full.fit(X)
299
+ pca_other.fit(X)
300
+
301
+ assert_allclose(pca_full.singular_values_, pca_other.singular_values_, rtol=5e-3)
302
+
303
+
304
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
305
+ def test_pca_singular_values(svd_solver):
306
+ rng = np.random.RandomState(0)
307
+ n_samples, n_features = 100, 80
308
+ X = rng.randn(n_samples, n_features)
309
+
310
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
311
+ X_trans = pca.fit_transform(X)
312
+
313
+ # compare to the Frobenius norm
314
+ assert_allclose(
315
+ np.sum(pca.singular_values_**2), np.linalg.norm(X_trans, "fro") ** 2
316
+ )
317
+ # Compare to the 2-norms of the score vectors
318
+ assert_allclose(pca.singular_values_, np.sqrt(np.sum(X_trans**2, axis=0)))
319
+
320
+ # set the singular values and see what er get back
321
+ n_samples, n_features = 100, 110
322
+ X = rng.randn(n_samples, n_features)
323
+
324
+ pca = PCA(n_components=3, svd_solver=svd_solver, random_state=rng)
325
+ X_trans = pca.fit_transform(X)
326
+ X_trans /= np.sqrt(np.sum(X_trans**2, axis=0))
327
+ X_trans[:, 0] *= 3.142
328
+ X_trans[:, 1] *= 2.718
329
+ X_hat = np.dot(X_trans, pca.components_)
330
+ pca.fit(X_hat)
331
+ assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0])
332
+
333
+
334
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
335
+ def test_pca_check_projection(svd_solver):
336
+ # Test that the projection of data is correct
337
+ rng = np.random.RandomState(0)
338
+ n, p = 100, 3
339
+ X = rng.randn(n, p) * 0.1
340
+ X[:10] += np.array([3, 4, 5])
341
+ Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
342
+
343
+ Yt = PCA(n_components=2, svd_solver=svd_solver).fit(X).transform(Xt)
344
+ Yt /= np.sqrt((Yt**2).sum())
345
+
346
+ assert_allclose(np.abs(Yt[0][0]), 1.0, rtol=5e-3)
347
+
348
+
349
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
350
+ def test_pca_check_projection_list(svd_solver):
351
+ # Test that the projection of data is correct
352
+ X = [[1.0, 0.0], [0.0, 1.0]]
353
+ pca = PCA(n_components=1, svd_solver=svd_solver, random_state=0)
354
+ X_trans = pca.fit_transform(X)
355
+ assert X_trans.shape, (2, 1)
356
+ assert_allclose(X_trans.mean(), 0.00, atol=1e-12)
357
+ assert_allclose(X_trans.std(), 0.71, rtol=5e-3)
358
+
359
+
360
+ @pytest.mark.parametrize("svd_solver", ["full", "arpack", "randomized"])
361
+ @pytest.mark.parametrize("whiten", [False, True])
362
+ def test_pca_inverse(svd_solver, whiten):
363
+ # Test that the projection of data can be inverted
364
+ rng = np.random.RandomState(0)
365
+ n, p = 50, 3
366
+ X = rng.randn(n, p) # spherical data
367
+ X[:, 1] *= 0.00001 # make middle component relatively small
368
+ X += [5, 4, 3] # make a large mean
369
+
370
+ # same check that we can find the original data from the transformed
371
+ # signal (since the data is almost of rank n_components)
372
+ pca = PCA(n_components=2, svd_solver=svd_solver, whiten=whiten).fit(X)
373
+ Y = pca.transform(X)
374
+ Y_inverse = pca.inverse_transform(Y)
375
+ assert_allclose(X, Y_inverse, rtol=5e-6)
376
+
377
+
378
+ @pytest.mark.parametrize(
379
+ "data", [np.array([[0, 1, 0], [1, 0, 0]]), np.array([[0, 1, 0], [1, 0, 0]]).T]
380
+ )
381
+ @pytest.mark.parametrize(
382
+ "svd_solver, n_components, err_msg",
383
+ [
384
+ ("arpack", 0, r"must be between 1 and min\(n_samples, n_features\)"),
385
+ ("randomized", 0, r"must be between 1 and min\(n_samples, n_features\)"),
386
+ ("arpack", 2, r"must be strictly less than min"),
387
+ (
388
+ "auto",
389
+ 3,
390
+ (
391
+ r"n_components=3 must be between 0 and min\(n_samples, "
392
+ r"n_features\)=2 with svd_solver='full'"
393
+ ),
394
+ ),
395
+ ],
396
+ )
397
+ def test_pca_validation(svd_solver, data, n_components, err_msg):
398
+ # Ensures that solver-specific extreme inputs for the n_components
399
+ # parameter raise errors
400
+ smallest_d = 2 # The smallest dimension
401
+ pca_fitted = PCA(n_components, svd_solver=svd_solver)
402
+
403
+ with pytest.raises(ValueError, match=err_msg):
404
+ pca_fitted.fit(data)
405
+
406
+ # Additional case for arpack
407
+ if svd_solver == "arpack":
408
+ n_components = smallest_d
409
+
410
+ err_msg = (
411
+ "n_components={}L? must be strictly less than "
412
+ r"min\(n_samples, n_features\)={}L? with "
413
+ "svd_solver='arpack'".format(n_components, smallest_d)
414
+ )
415
+ with pytest.raises(ValueError, match=err_msg):
416
+ PCA(n_components, svd_solver=svd_solver).fit(data)
417
+
418
+
419
+ @pytest.mark.parametrize(
420
+ "solver, n_components_",
421
+ [
422
+ ("full", min(iris.data.shape)),
423
+ ("arpack", min(iris.data.shape) - 1),
424
+ ("randomized", min(iris.data.shape)),
425
+ ],
426
+ )
427
+ @pytest.mark.parametrize("data", [iris.data, iris.data.T])
428
+ def test_n_components_none(data, solver, n_components_):
429
+ pca = PCA(svd_solver=solver)
430
+ pca.fit(data)
431
+ assert pca.n_components_ == n_components_
432
+
433
+
434
+ @pytest.mark.parametrize("svd_solver", ["auto", "full"])
435
+ def test_n_components_mle(svd_solver):
436
+ # Ensure that n_components == 'mle' doesn't raise error for auto/full
437
+ rng = np.random.RandomState(0)
438
+ n_samples, n_features = 600, 10
439
+ X = rng.randn(n_samples, n_features)
440
+ pca = PCA(n_components="mle", svd_solver=svd_solver)
441
+ pca.fit(X)
442
+ assert pca.n_components_ == 1
443
+
444
+
445
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
446
+ def test_n_components_mle_error(svd_solver):
447
+ # Ensure that n_components == 'mle' will raise an error for unsupported
448
+ # solvers
449
+ rng = np.random.RandomState(0)
450
+ n_samples, n_features = 600, 10
451
+ X = rng.randn(n_samples, n_features)
452
+ pca = PCA(n_components="mle", svd_solver=svd_solver)
453
+ err_msg = "n_components='mle' cannot be a string with svd_solver='{}'".format(
454
+ svd_solver
455
+ )
456
+ with pytest.raises(ValueError, match=err_msg):
457
+ pca.fit(X)
458
+
459
+
460
+ def test_pca_dim():
461
+ # Check automated dimensionality setting
462
+ rng = np.random.RandomState(0)
463
+ n, p = 100, 5
464
+ X = rng.randn(n, p) * 0.1
465
+ X[:10] += np.array([3, 4, 5, 1, 2])
466
+ pca = PCA(n_components="mle", svd_solver="full").fit(X)
467
+ assert pca.n_components == "mle"
468
+ assert pca.n_components_ == 1
469
+
470
+
471
+ def test_infer_dim_1():
472
+ # TODO: explain what this is testing
473
+ # Or at least use explicit variable names...
474
+ n, p = 1000, 5
475
+ rng = np.random.RandomState(0)
476
+ X = (
477
+ rng.randn(n, p) * 0.1
478
+ + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
479
+ + np.array([1, 0, 7, 4, 6])
480
+ )
481
+ pca = PCA(n_components=p, svd_solver="full")
482
+ pca.fit(X)
483
+ spect = pca.explained_variance_
484
+ ll = np.array([_assess_dimension(spect, k, n) for k in range(1, p)])
485
+ assert ll[1] > ll.max() - 0.01 * n
486
+
487
+
488
+ def test_infer_dim_2():
489
+ # TODO: explain what this is testing
490
+ # Or at least use explicit variable names...
491
+ n, p = 1000, 5
492
+ rng = np.random.RandomState(0)
493
+ X = rng.randn(n, p) * 0.1
494
+ X[:10] += np.array([3, 4, 5, 1, 2])
495
+ X[10:20] += np.array([6, 0, 7, 2, -1])
496
+ pca = PCA(n_components=p, svd_solver="full")
497
+ pca.fit(X)
498
+ spect = pca.explained_variance_
499
+ assert _infer_dimension(spect, n) > 1
500
+
501
+
502
+ def test_infer_dim_3():
503
+ n, p = 100, 5
504
+ rng = np.random.RandomState(0)
505
+ X = rng.randn(n, p) * 0.1
506
+ X[:10] += np.array([3, 4, 5, 1, 2])
507
+ X[10:20] += np.array([6, 0, 7, 2, -1])
508
+ X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
509
+ pca = PCA(n_components=p, svd_solver="full")
510
+ pca.fit(X)
511
+ spect = pca.explained_variance_
512
+ assert _infer_dimension(spect, n) > 2
513
+
514
+
515
+ @pytest.mark.parametrize(
516
+ "X, n_components, n_components_validated",
517
+ [
518
+ (iris.data, 0.95, 2), # row > col
519
+ (iris.data, 0.01, 1), # row > col
520
+ (np.random.RandomState(0).rand(5, 20), 0.5, 2),
521
+ ], # row < col
522
+ )
523
+ def test_infer_dim_by_explained_variance(X, n_components, n_components_validated):
524
+ pca = PCA(n_components=n_components, svd_solver="full")
525
+ pca.fit(X)
526
+ assert pca.n_components == pytest.approx(n_components)
527
+ assert pca.n_components_ == n_components_validated
528
+
529
+
530
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
531
+ def test_pca_score(svd_solver):
532
+ # Test that probabilistic PCA scoring yields a reasonable score
533
+ n, p = 1000, 3
534
+ rng = np.random.RandomState(0)
535
+ X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
536
+ pca = PCA(n_components=2, svd_solver=svd_solver)
537
+ pca.fit(X)
538
+
539
+ ll1 = pca.score(X)
540
+ h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1**2) * p
541
+ assert_allclose(ll1 / h, 1, rtol=5e-2)
542
+
543
+ ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5]))
544
+ assert ll1 > ll2
545
+
546
+ pca = PCA(n_components=2, whiten=True, svd_solver=svd_solver)
547
+ pca.fit(X)
548
+ ll2 = pca.score(X)
549
+ assert ll1 > ll2
550
+
551
+
552
+ def test_pca_score3():
553
+ # Check that probabilistic PCA selects the right model
554
+ n, p = 200, 3
555
+ rng = np.random.RandomState(0)
556
+ Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
557
+ Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
558
+ ll = np.zeros(p)
559
+ for k in range(p):
560
+ pca = PCA(n_components=k, svd_solver="full")
561
+ pca.fit(Xl)
562
+ ll[k] = pca.score(Xt)
563
+
564
+ assert ll.argmax() == 1
565
+
566
+
567
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
568
+ def test_pca_sanity_noise_variance(svd_solver):
569
+ # Sanity check for the noise_variance_. For more details see
570
+ # https://github.com/scikit-learn/scikit-learn/issues/7568
571
+ # https://github.com/scikit-learn/scikit-learn/issues/8541
572
+ # https://github.com/scikit-learn/scikit-learn/issues/8544
573
+ X, _ = datasets.load_digits(return_X_y=True)
574
+ pca = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
575
+ pca.fit(X)
576
+ assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0)
577
+
578
+
579
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
580
+ def test_pca_score_consistency_solvers(svd_solver):
581
+ # Check the consistency of score between solvers
582
+ X, _ = datasets.load_digits(return_X_y=True)
583
+ pca_full = PCA(n_components=30, svd_solver="full", random_state=0)
584
+ pca_other = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
585
+ pca_full.fit(X)
586
+ pca_other.fit(X)
587
+ assert_allclose(pca_full.score(X), pca_other.score(X), rtol=5e-6)
588
+
589
+
590
+ # arpack raises ValueError for n_components == min(n_samples, n_features)
591
+ @pytest.mark.parametrize("svd_solver", ["full", "randomized"])
592
+ def test_pca_zero_noise_variance_edge_cases(svd_solver):
593
+ # ensure that noise_variance_ is 0 in edge cases
594
+ # when n_components == min(n_samples, n_features)
595
+ n, p = 100, 3
596
+ rng = np.random.RandomState(0)
597
+ X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
598
+
599
+ pca = PCA(n_components=p, svd_solver=svd_solver)
600
+ pca.fit(X)
601
+ assert pca.noise_variance_ == 0
602
+ # Non-regression test for gh-12489
603
+ # ensure no divide-by-zero error for n_components == n_features < n_samples
604
+ pca.score(X)
605
+
606
+ pca.fit(X.T)
607
+ assert pca.noise_variance_ == 0
608
+ # Non-regression test for gh-12489
609
+ # ensure no divide-by-zero error for n_components == n_samples < n_features
610
+ pca.score(X.T)
611
+
612
+
613
+ @pytest.mark.parametrize(
614
+ "data, n_components, expected_solver",
615
+ [ # case: n_components in (0,1) => 'full'
616
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 0.5, "full"),
617
+ # case: max(X.shape) <= 500 => 'full'
618
+ (np.random.RandomState(0).uniform(size=(10, 50)), 5, "full"),
619
+ # case: n_components >= .8 * min(X.shape) => 'full'
620
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 50, "full"),
621
+ # n_components >= 1 and n_components < .8*min(X.shape) => 'randomized'
622
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 10, "randomized"),
623
+ ],
624
+ )
625
+ def test_pca_svd_solver_auto(data, n_components, expected_solver):
626
+ pca_auto = PCA(n_components=n_components, random_state=0)
627
+ pca_test = PCA(
628
+ n_components=n_components, svd_solver=expected_solver, random_state=0
629
+ )
630
+ pca_auto.fit(data)
631
+ pca_test.fit(data)
632
+ assert_allclose(pca_auto.components_, pca_test.components_)
633
+
634
+
635
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
636
+ def test_pca_deterministic_output(svd_solver):
637
+ rng = np.random.RandomState(0)
638
+ X = rng.rand(10, 10)
639
+
640
+ transformed_X = np.zeros((20, 2))
641
+ for i in range(20):
642
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
643
+ transformed_X[i, :] = pca.fit_transform(X)[0]
644
+ assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
645
+
646
+
647
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
648
+ def test_pca_dtype_preservation(svd_solver):
649
+ check_pca_float_dtype_preservation(svd_solver)
650
+ check_pca_int_dtype_upcast_to_double(svd_solver)
651
+
652
+
653
+ def check_pca_float_dtype_preservation(svd_solver):
654
+ # Ensure that PCA does not upscale the dtype when input is float32
655
+ X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False)
656
+ X_32 = X_64.astype(np.float32)
657
+
658
+ pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64)
659
+ pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32)
660
+
661
+ assert pca_64.components_.dtype == np.float64
662
+ assert pca_32.components_.dtype == np.float32
663
+ assert pca_64.transform(X_64).dtype == np.float64
664
+ assert pca_32.transform(X_32).dtype == np.float32
665
+
666
+ # the rtol is set such that the test passes on all platforms tested on
667
+ # conda-forge: PR#15775
668
+ # see: https://github.com/conda-forge/scikit-learn-feedstock/pull/113
669
+ assert_allclose(pca_64.components_, pca_32.components_, rtol=2e-4)
670
+
671
+
672
+ def check_pca_int_dtype_upcast_to_double(svd_solver):
673
+ # Ensure that all int types will be upcast to float64
674
+ X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
675
+ X_i64 = X_i64.astype(np.int64, copy=False)
676
+ X_i32 = X_i64.astype(np.int32, copy=False)
677
+
678
+ pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64)
679
+ pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32)
680
+
681
+ assert pca_64.components_.dtype == np.float64
682
+ assert pca_32.components_.dtype == np.float64
683
+ assert pca_64.transform(X_i64).dtype == np.float64
684
+ assert pca_32.transform(X_i32).dtype == np.float64
685
+
686
+ assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-4)
687
+
688
+
689
+ def test_pca_n_components_mostly_explained_variance_ratio():
690
+ # when n_components is the second highest cumulative sum of the
691
+ # explained_variance_ratio_, then n_components_ should equal the
692
+ # number of features in the dataset #15669
693
+ X, y = load_iris(return_X_y=True)
694
+ pca1 = PCA().fit(X, y)
695
+
696
+ n_components = pca1.explained_variance_ratio_.cumsum()[-2]
697
+ pca2 = PCA(n_components=n_components).fit(X, y)
698
+ assert pca2.n_components_ == X.shape[1]
699
+
700
+
701
+ def test_assess_dimension_bad_rank():
702
+ # Test error when tested rank not in [1, n_features - 1]
703
+ spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
704
+ n_samples = 10
705
+ for rank in (0, 5):
706
+ with pytest.raises(ValueError, match=r"should be in \[1, n_features - 1\]"):
707
+ _assess_dimension(spectrum, rank, n_samples)
708
+
709
+
710
+ def test_small_eigenvalues_mle():
711
+ # Test rank associated with tiny eigenvalues are given a log-likelihood of
712
+ # -inf. The inferred rank will be 1
713
+ spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
714
+
715
+ assert _assess_dimension(spectrum, rank=1, n_samples=10) > -np.inf
716
+
717
+ for rank in (2, 3):
718
+ assert _assess_dimension(spectrum, rank, 10) == -np.inf
719
+
720
+ assert _infer_dimension(spectrum, 10) == 1
721
+
722
+
723
+ def test_mle_redundant_data():
724
+ # Test 'mle' with pathological X: only one relevant feature should give a
725
+ # rank of 1
726
+ X, _ = datasets.make_classification(
727
+ n_features=20,
728
+ n_informative=1,
729
+ n_repeated=18,
730
+ n_redundant=1,
731
+ n_clusters_per_class=1,
732
+ random_state=42,
733
+ )
734
+ pca = PCA(n_components="mle").fit(X)
735
+ assert pca.n_components_ == 1
736
+
737
+
738
+ def test_fit_mle_too_few_samples():
739
+ # Tests that an error is raised when the number of samples is smaller
740
+ # than the number of features during an mle fit
741
+ X, _ = datasets.make_classification(n_samples=20, n_features=21, random_state=42)
742
+
743
+ pca = PCA(n_components="mle", svd_solver="full")
744
+ with pytest.raises(
745
+ ValueError,
746
+ match="n_components='mle' is only supported if n_samples >= n_features",
747
+ ):
748
+ pca.fit(X)
749
+
750
+
751
+ def test_mle_simple_case():
752
+ # non-regression test for issue
753
+ # https://github.com/scikit-learn/scikit-learn/issues/16730
754
+ n_samples, n_dim = 1000, 10
755
+ X = np.random.RandomState(0).randn(n_samples, n_dim)
756
+ X[:, -1] = np.mean(X[:, :-1], axis=-1) # true X dim is ndim - 1
757
+ pca_skl = PCA("mle", svd_solver="full")
758
+ pca_skl.fit(X)
759
+ assert pca_skl.n_components_ == n_dim - 1
760
+
761
+
762
+ def test_assess_dimesion_rank_one():
763
+ # Make sure assess_dimension works properly on a matrix of rank 1
764
+ n_samples, n_features = 9, 6
765
+ X = np.ones((n_samples, n_features)) # rank 1 matrix
766
+ _, s, _ = np.linalg.svd(X, full_matrices=True)
767
+ # except for rank 1, all eigenvalues are 0 resp. close to 0 (FP)
768
+ assert_allclose(s[1:], np.zeros(n_features - 1), atol=1e-12)
769
+
770
+ assert np.isfinite(_assess_dimension(s, rank=1, n_samples=n_samples))
771
+ for rank in range(2, n_features):
772
+ assert _assess_dimension(s, rank, n_samples) == -np.inf
773
+
774
+
775
+ def test_pca_randomized_svd_n_oversamples():
776
+ """Check that exposing and setting `n_oversamples` will provide accurate results
777
+ even when `X` as a large number of features.
778
+
779
+ Non-regression test for:
780
+ https://github.com/scikit-learn/scikit-learn/issues/20589
781
+ """
782
+ rng = np.random.RandomState(0)
783
+ n_features = 100
784
+ X = rng.randn(1_000, n_features)
785
+
786
+ # The default value of `n_oversamples` will lead to inaccurate results
787
+ # We force it to the number of features.
788
+ pca_randomized = PCA(
789
+ n_components=1,
790
+ svd_solver="randomized",
791
+ n_oversamples=n_features,
792
+ random_state=0,
793
+ ).fit(X)
794
+ pca_full = PCA(n_components=1, svd_solver="full").fit(X)
795
+ pca_arpack = PCA(n_components=1, svd_solver="arpack", random_state=0).fit(X)
796
+
797
+ assert_allclose(np.abs(pca_full.components_), np.abs(pca_arpack.components_))
798
+ assert_allclose(np.abs(pca_randomized.components_), np.abs(pca_arpack.components_))
799
+
800
+
801
+ def test_feature_names_out():
802
+ """Check feature names out for PCA."""
803
+ pca = PCA(n_components=2).fit(iris.data)
804
+
805
+ names = pca.get_feature_names_out()
806
+ assert_array_equal([f"pca{i}" for i in range(2)], names)
807
+
808
+
809
+ @pytest.mark.parametrize("copy", [True, False])
810
+ def test_variance_correctness(copy):
811
+ """Check the accuracy of PCA's internal variance calculation"""
812
+ rng = np.random.RandomState(0)
813
+ X = rng.randn(1000, 200)
814
+ pca = PCA().fit(X)
815
+ pca_var = pca.explained_variance_ / pca.explained_variance_ratio_
816
+ true_var = np.var(X, ddof=1, axis=0).sum()
817
+ np.testing.assert_allclose(pca_var, true_var)
818
+
819
+
820
+ def check_array_api_get_precision(name, estimator, array_namespace, device, dtype_name):
821
+ xp = _array_api_for_tests(array_namespace, device)
822
+ iris_np = iris.data.astype(dtype_name)
823
+ iris_xp = xp.asarray(iris_np, device=device)
824
+
825
+ estimator.fit(iris_np)
826
+ precision_np = estimator.get_precision()
827
+ covariance_np = estimator.get_covariance()
828
+
829
+ with config_context(array_api_dispatch=True):
830
+ estimator_xp = clone(estimator).fit(iris_xp)
831
+ precision_xp = estimator_xp.get_precision()
832
+ assert precision_xp.shape == (4, 4)
833
+ assert precision_xp.dtype == iris_xp.dtype
834
+
835
+ assert_allclose(
836
+ _convert_to_numpy(precision_xp, xp=xp),
837
+ precision_np,
838
+ atol=_atol_for_type(dtype_name),
839
+ )
840
+ covariance_xp = estimator_xp.get_covariance()
841
+ assert covariance_xp.shape == (4, 4)
842
+ assert covariance_xp.dtype == iris_xp.dtype
843
+
844
+ assert_allclose(
845
+ _convert_to_numpy(covariance_xp, xp=xp),
846
+ covariance_np,
847
+ atol=_atol_for_type(dtype_name),
848
+ )
849
+
850
+
851
+ @pytest.mark.parametrize(
852
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
853
+ )
854
+ @pytest.mark.parametrize(
855
+ "check",
856
+ [check_array_api_input_and_values, check_array_api_get_precision],
857
+ ids=_get_check_estimator_ids,
858
+ )
859
+ @pytest.mark.parametrize(
860
+ "estimator",
861
+ [
862
+ PCA(n_components=2, svd_solver="full"),
863
+ PCA(n_components=0.1, svd_solver="full", whiten=True),
864
+ PCA(
865
+ n_components=2,
866
+ svd_solver="randomized",
867
+ power_iteration_normalizer="QR",
868
+ random_state=0, # how to use global_random_seed here?
869
+ ),
870
+ ],
871
+ ids=_get_check_estimator_ids,
872
+ )
873
+ def test_pca_array_api_compliance(
874
+ estimator, check, array_namespace, device, dtype_name
875
+ ):
876
+ name = estimator.__class__.__name__
877
+ check(name, estimator, array_namespace, device=device, dtype_name=dtype_name)
878
+
879
+
880
+ @pytest.mark.parametrize(
881
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
882
+ )
883
+ @pytest.mark.parametrize(
884
+ "check",
885
+ [check_array_api_get_precision],
886
+ ids=_get_check_estimator_ids,
887
+ )
888
+ @pytest.mark.parametrize(
889
+ "estimator",
890
+ [
891
+ # PCA with mle cannot use check_array_api_input_and_values because of
892
+ # rounding errors in the noisy (low variance) components. Even checking
893
+ # the shape of the `components_` is problematic because the number of
894
+ # components depends on trimming threshold of the mle algorithm which
895
+ # can depend on device-specific rounding errors.
896
+ PCA(n_components="mle", svd_solver="full"),
897
+ ],
898
+ ids=_get_check_estimator_ids,
899
+ )
900
+ def test_pca_mle_array_api_compliance(
901
+ estimator, check, array_namespace, device, dtype_name
902
+ ):
903
+ name = estimator.__class__.__name__
904
+ check(name, estimator, array_namespace, device=device, dtype_name=dtype_name)
905
+
906
+ # Simpler variant of the generic check_array_api_input checker tailored for
907
+ # the specific case of PCA with mle-trimmed components.
908
+ xp = _array_api_for_tests(array_namespace, device)
909
+
910
+ X, y = make_classification(random_state=42)
911
+ X = X.astype(dtype_name, copy=False)
912
+ atol = _atol_for_type(X.dtype)
913
+
914
+ est = clone(estimator)
915
+
916
+ X_xp = xp.asarray(X, device=device)
917
+ y_xp = xp.asarray(y, device=device)
918
+
919
+ est.fit(X, y)
920
+
921
+ components_np = est.components_
922
+ explained_variance_np = est.explained_variance_
923
+
924
+ est_xp = clone(est)
925
+ with config_context(array_api_dispatch=True):
926
+ est_xp.fit(X_xp, y_xp)
927
+ components_xp = est_xp.components_
928
+ assert array_device(components_xp) == array_device(X_xp)
929
+ components_xp_np = _convert_to_numpy(components_xp, xp=xp)
930
+
931
+ explained_variance_xp = est_xp.explained_variance_
932
+ assert array_device(explained_variance_xp) == array_device(X_xp)
933
+ explained_variance_xp_np = _convert_to_numpy(explained_variance_xp, xp=xp)
934
+
935
+ assert components_xp_np.dtype == components_np.dtype
936
+ assert components_xp_np.shape[1] == components_np.shape[1]
937
+ assert explained_variance_xp_np.dtype == explained_variance_np.dtype
938
+
939
+ # Check that the explained variance values match for the
940
+ # common components:
941
+ min_components = min(components_xp_np.shape[0], components_np.shape[0])
942
+ assert_allclose(
943
+ explained_variance_xp_np[:min_components],
944
+ explained_variance_np[:min_components],
945
+ atol=atol,
946
+ )
947
+
948
+ # If the number of components differ, check that the explained variance of
949
+ # the trimmed components is very small.
950
+ if components_xp_np.shape[0] != components_np.shape[0]:
951
+ reference_variance = explained_variance_np[-1]
952
+ extra_variance_np = explained_variance_np[min_components:]
953
+ extra_variance_xp_np = explained_variance_xp_np[min_components:]
954
+ assert all(np.abs(extra_variance_np - reference_variance) < atol)
955
+ assert all(np.abs(extra_variance_xp_np - reference_variance) < atol)
956
+
957
+
958
+ def test_array_api_error_and_warnings_on_unsupported_params():
959
+ pytest.importorskip("array_api_compat")
960
+ xp = pytest.importorskip("numpy.array_api")
961
+ iris_xp = xp.asarray(iris.data)
962
+
963
+ pca = PCA(n_components=2, svd_solver="arpack", random_state=0)
964
+ expected_msg = re.escape(
965
+ "PCA with svd_solver='arpack' is not supported for Array API inputs."
966
+ )
967
+ with pytest.raises(ValueError, match=expected_msg):
968
+ with config_context(array_api_dispatch=True):
969
+ pca.fit(iris_xp)
970
+
971
+ pca.set_params(svd_solver="randomized", power_iteration_normalizer="LU")
972
+ expected_msg = re.escape(
973
+ "Array API does not support LU factorization. Set"
974
+ " `power_iteration_normalizer='QR'` instead."
975
+ )
976
+ with pytest.raises(ValueError, match=expected_msg):
977
+ with config_context(array_api_dispatch=True):
978
+ pca.fit(iris_xp)
979
+
980
+ pca.set_params(svd_solver="randomized", power_iteration_normalizer="auto")
981
+ expected_msg = re.escape(
982
+ "Array API does not support LU factorization, falling back to QR instead. Set"
983
+ " `power_iteration_normalizer='QR'` explicitly to silence this warning."
984
+ )
985
+ with pytest.warns(UserWarning, match=expected_msg):
986
+ with config_context(array_api_dispatch=True):
987
+ pca.fit(iris_xp)
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test truncated SVD transformer."""
2
+
3
+ import numpy as np
4
+ import pytest
5
+ import scipy.sparse as sp
6
+
7
+ from sklearn.decomposition import PCA, TruncatedSVD
8
+ from sklearn.utils import check_random_state
9
+ from sklearn.utils._testing import assert_allclose, assert_array_less
10
+
11
+ SVD_SOLVERS = ["arpack", "randomized"]
12
+
13
+
14
+ @pytest.fixture(scope="module")
15
+ def X_sparse():
16
+ # Make an X that looks somewhat like a small tf-idf matrix.
17
+ rng = check_random_state(42)
18
+ X = sp.random(60, 55, density=0.2, format="csr", random_state=rng)
19
+ X.data[:] = 1 + np.log(X.data)
20
+ return X
21
+
22
+
23
+ @pytest.mark.parametrize("solver", ["randomized"])
24
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
25
+ def test_solvers(X_sparse, solver, kind):
26
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
27
+ svd_a = TruncatedSVD(30, algorithm="arpack")
28
+ svd = TruncatedSVD(30, algorithm=solver, random_state=42, n_oversamples=100)
29
+
30
+ Xa = svd_a.fit_transform(X)[:, :6]
31
+ Xr = svd.fit_transform(X)[:, :6]
32
+ assert_allclose(Xa, Xr, rtol=2e-3)
33
+
34
+ comp_a = np.abs(svd_a.components_)
35
+ comp = np.abs(svd.components_)
36
+ # All elements are equal, but some elements are more equal than others.
37
+ assert_allclose(comp_a[:9], comp[:9], rtol=1e-3)
38
+ assert_allclose(comp_a[9:], comp[9:], atol=1e-2)
39
+
40
+
41
+ @pytest.mark.parametrize("n_components", (10, 25, 41, 55))
42
+ def test_attributes(n_components, X_sparse):
43
+ n_features = X_sparse.shape[1]
44
+ tsvd = TruncatedSVD(n_components).fit(X_sparse)
45
+ assert tsvd.n_components == n_components
46
+ assert tsvd.components_.shape == (n_components, n_features)
47
+
48
+
49
+ @pytest.mark.parametrize(
50
+ "algorithm, n_components",
51
+ [
52
+ ("arpack", 55),
53
+ ("arpack", 56),
54
+ ("randomized", 56),
55
+ ],
56
+ )
57
+ def test_too_many_components(X_sparse, algorithm, n_components):
58
+ tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
59
+ with pytest.raises(ValueError):
60
+ tsvd.fit(X_sparse)
61
+
62
+
63
+ @pytest.mark.parametrize("fmt", ("array", "csr", "csc", "coo", "lil"))
64
+ def test_sparse_formats(fmt, X_sparse):
65
+ n_samples = X_sparse.shape[0]
66
+ Xfmt = X_sparse.toarray() if fmt == "dense" else getattr(X_sparse, "to" + fmt)()
67
+ tsvd = TruncatedSVD(n_components=11)
68
+ Xtrans = tsvd.fit_transform(Xfmt)
69
+ assert Xtrans.shape == (n_samples, 11)
70
+ Xtrans = tsvd.transform(Xfmt)
71
+ assert Xtrans.shape == (n_samples, 11)
72
+
73
+
74
+ @pytest.mark.parametrize("algo", SVD_SOLVERS)
75
+ def test_inverse_transform(algo, X_sparse):
76
+ # We need a lot of components for the reconstruction to be "almost
77
+ # equal" in all positions. XXX Test means or sums instead?
78
+ tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
79
+ Xt = tsvd.fit_transform(X_sparse)
80
+ Xinv = tsvd.inverse_transform(Xt)
81
+ assert_allclose(Xinv, X_sparse.toarray(), rtol=1e-1, atol=2e-1)
82
+
83
+
84
+ def test_integers(X_sparse):
85
+ n_samples = X_sparse.shape[0]
86
+ Xint = X_sparse.astype(np.int64)
87
+ tsvd = TruncatedSVD(n_components=6)
88
+ Xtrans = tsvd.fit_transform(Xint)
89
+ assert Xtrans.shape == (n_samples, tsvd.n_components)
90
+
91
+
92
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
93
+ @pytest.mark.parametrize("n_components", [10, 20])
94
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
95
+ def test_explained_variance(X_sparse, kind, n_components, solver):
96
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
97
+ svd = TruncatedSVD(n_components, algorithm=solver)
98
+ X_tr = svd.fit_transform(X)
99
+ # Assert that all the values are greater than 0
100
+ assert_array_less(0.0, svd.explained_variance_ratio_)
101
+
102
+ # Assert that total explained variance is less than 1
103
+ assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
104
+
105
+ # Test that explained_variance is correct
106
+ total_variance = np.var(X_sparse.toarray(), axis=0).sum()
107
+ variances = np.var(X_tr, axis=0)
108
+ true_explained_variance_ratio = variances / total_variance
109
+
110
+ assert_allclose(
111
+ svd.explained_variance_ratio_,
112
+ true_explained_variance_ratio,
113
+ )
114
+
115
+
116
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
117
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
118
+ def test_explained_variance_components_10_20(X_sparse, kind, solver):
119
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
120
+ svd_10 = TruncatedSVD(10, algorithm=solver, n_iter=10).fit(X)
121
+ svd_20 = TruncatedSVD(20, algorithm=solver, n_iter=10).fit(X)
122
+
123
+ # Assert the 1st component is equal
124
+ assert_allclose(
125
+ svd_10.explained_variance_ratio_,
126
+ svd_20.explained_variance_ratio_[:10],
127
+ rtol=5e-3,
128
+ )
129
+
130
+ # Assert that 20 components has higher explained variance than 10
131
+ assert (
132
+ svd_20.explained_variance_ratio_.sum() > svd_10.explained_variance_ratio_.sum()
133
+ )
134
+
135
+
136
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
137
+ def test_singular_values_consistency(solver):
138
+ # Check that the TruncatedSVD output has the correct singular values
139
+ rng = np.random.RandomState(0)
140
+ n_samples, n_features = 100, 80
141
+ X = rng.randn(n_samples, n_features)
142
+
143
+ pca = TruncatedSVD(n_components=2, algorithm=solver, random_state=rng).fit(X)
144
+
145
+ # Compare to the Frobenius norm
146
+ X_pca = pca.transform(X)
147
+ assert_allclose(
148
+ np.sum(pca.singular_values_**2.0),
149
+ np.linalg.norm(X_pca, "fro") ** 2.0,
150
+ rtol=1e-2,
151
+ )
152
+
153
+ # Compare to the 2-norms of the score vectors
154
+ assert_allclose(
155
+ pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), rtol=1e-2
156
+ )
157
+
158
+
159
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
160
+ def test_singular_values_expected(solver):
161
+ # Set the singular values and see what we get back
162
+ rng = np.random.RandomState(0)
163
+ n_samples = 100
164
+ n_features = 110
165
+
166
+ X = rng.randn(n_samples, n_features)
167
+
168
+ pca = TruncatedSVD(n_components=3, algorithm=solver, random_state=rng)
169
+ X_pca = pca.fit_transform(X)
170
+
171
+ X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
172
+ X_pca[:, 0] *= 3.142
173
+ X_pca[:, 1] *= 2.718
174
+
175
+ X_hat_pca = np.dot(X_pca, pca.components_)
176
+ pca.fit(X_hat_pca)
177
+ assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0], rtol=1e-14)
178
+
179
+
180
+ def test_truncated_svd_eq_pca(X_sparse):
181
+ # TruncatedSVD should be equal to PCA on centered data
182
+
183
+ X_dense = X_sparse.toarray()
184
+
185
+ X_c = X_dense - X_dense.mean(axis=0)
186
+
187
+ params = dict(n_components=10, random_state=42)
188
+
189
+ svd = TruncatedSVD(algorithm="arpack", **params)
190
+ pca = PCA(svd_solver="arpack", **params)
191
+
192
+ Xt_svd = svd.fit_transform(X_c)
193
+ Xt_pca = pca.fit_transform(X_c)
194
+
195
+ assert_allclose(Xt_svd, Xt_pca, rtol=1e-9)
196
+ assert_allclose(pca.mean_, 0, atol=1e-9)
197
+ assert_allclose(svd.components_, pca.components_)
198
+
199
+
200
+ @pytest.mark.parametrize(
201
+ "algorithm, tol", [("randomized", 0.0), ("arpack", 1e-6), ("arpack", 0.0)]
202
+ )
203
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
204
+ def test_fit_transform(X_sparse, algorithm, tol, kind):
205
+ # fit_transform(X) should equal fit(X).transform(X)
206
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
207
+ svd = TruncatedSVD(
208
+ n_components=5, n_iter=7, random_state=42, algorithm=algorithm, tol=tol
209
+ )
210
+ X_transformed_1 = svd.fit_transform(X)
211
+ X_transformed_2 = svd.fit(X).transform(X)
212
+ assert_allclose(X_transformed_1, X_transformed_2)
venv/lib/python3.10/site-packages/sklearn/ensemble/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.ensemble` module includes ensemble-based methods for
3
+ classification, regression and anomaly detection.
4
+ """
5
+ from ._bagging import BaggingClassifier, BaggingRegressor
6
+ from ._base import BaseEnsemble
7
+ from ._forest import (
8
+ ExtraTreesClassifier,
9
+ ExtraTreesRegressor,
10
+ RandomForestClassifier,
11
+ RandomForestRegressor,
12
+ RandomTreesEmbedding,
13
+ )
14
+ from ._gb import GradientBoostingClassifier, GradientBoostingRegressor
15
+ from ._hist_gradient_boosting.gradient_boosting import (
16
+ HistGradientBoostingClassifier,
17
+ HistGradientBoostingRegressor,
18
+ )
19
+ from ._iforest import IsolationForest
20
+ from ._stacking import StackingClassifier, StackingRegressor
21
+ from ._voting import VotingClassifier, VotingRegressor
22
+ from ._weight_boosting import AdaBoostClassifier, AdaBoostRegressor
23
+
24
+ __all__ = [
25
+ "BaseEnsemble",
26
+ "RandomForestClassifier",
27
+ "RandomForestRegressor",
28
+ "RandomTreesEmbedding",
29
+ "ExtraTreesClassifier",
30
+ "ExtraTreesRegressor",
31
+ "BaggingClassifier",
32
+ "BaggingRegressor",
33
+ "IsolationForest",
34
+ "GradientBoostingClassifier",
35
+ "GradientBoostingRegressor",
36
+ "AdaBoostClassifier",
37
+ "AdaBoostRegressor",
38
+ "VotingClassifier",
39
+ "VotingRegressor",
40
+ "StackingClassifier",
41
+ "StackingRegressor",
42
+ "HistGradientBoostingClassifier",
43
+ "HistGradientBoostingRegressor",
44
+ ]
venv/lib/python3.10/site-packages/sklearn/ensemble/_bagging.py ADDED
@@ -0,0 +1,1242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bagging meta-estimator."""
2
+
3
+ # Author: Gilles Louppe <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+
7
+ import itertools
8
+ import numbers
9
+ from abc import ABCMeta, abstractmethod
10
+ from functools import partial
11
+ from numbers import Integral
12
+ from warnings import warn
13
+
14
+ import numpy as np
15
+
16
+ from ..base import ClassifierMixin, RegressorMixin, _fit_context
17
+ from ..metrics import accuracy_score, r2_score
18
+ from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
19
+ from ..utils import check_random_state, column_or_1d, indices_to_mask
20
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt
21
+ from ..utils._tags import _safe_tags
22
+ from ..utils.metadata_routing import (
23
+ _raise_for_unsupported_routing,
24
+ _RoutingNotSupportedMixin,
25
+ )
26
+ from ..utils.metaestimators import available_if
27
+ from ..utils.multiclass import check_classification_targets
28
+ from ..utils.parallel import Parallel, delayed
29
+ from ..utils.random import sample_without_replacement
30
+ from ..utils.validation import _check_sample_weight, check_is_fitted, has_fit_parameter
31
+ from ._base import BaseEnsemble, _partition_estimators
32
+
33
+ __all__ = ["BaggingClassifier", "BaggingRegressor"]
34
+
35
+ MAX_INT = np.iinfo(np.int32).max
36
+
37
+
38
+ def _generate_indices(random_state, bootstrap, n_population, n_samples):
39
+ """Draw randomly sampled indices."""
40
+ # Draw sample indices
41
+ if bootstrap:
42
+ indices = random_state.randint(0, n_population, n_samples)
43
+ else:
44
+ indices = sample_without_replacement(
45
+ n_population, n_samples, random_state=random_state
46
+ )
47
+
48
+ return indices
49
+
50
+
51
+ def _generate_bagging_indices(
52
+ random_state,
53
+ bootstrap_features,
54
+ bootstrap_samples,
55
+ n_features,
56
+ n_samples,
57
+ max_features,
58
+ max_samples,
59
+ ):
60
+ """Randomly draw feature and sample indices."""
61
+ # Get valid random state
62
+ random_state = check_random_state(random_state)
63
+
64
+ # Draw indices
65
+ feature_indices = _generate_indices(
66
+ random_state, bootstrap_features, n_features, max_features
67
+ )
68
+ sample_indices = _generate_indices(
69
+ random_state, bootstrap_samples, n_samples, max_samples
70
+ )
71
+
72
+ return feature_indices, sample_indices
73
+
74
+
75
+ def _parallel_build_estimators(
76
+ n_estimators,
77
+ ensemble,
78
+ X,
79
+ y,
80
+ sample_weight,
81
+ seeds,
82
+ total_n_estimators,
83
+ verbose,
84
+ check_input,
85
+ ):
86
+ """Private function used to build a batch of estimators within a job."""
87
+ # Retrieve settings
88
+ n_samples, n_features = X.shape
89
+ max_features = ensemble._max_features
90
+ max_samples = ensemble._max_samples
91
+ bootstrap = ensemble.bootstrap
92
+ bootstrap_features = ensemble.bootstrap_features
93
+ support_sample_weight = has_fit_parameter(ensemble.estimator_, "sample_weight")
94
+ has_check_input = has_fit_parameter(ensemble.estimator_, "check_input")
95
+ requires_feature_indexing = bootstrap_features or max_features != n_features
96
+
97
+ if not support_sample_weight and sample_weight is not None:
98
+ raise ValueError("The base estimator doesn't support sample weight")
99
+
100
+ # Build estimators
101
+ estimators = []
102
+ estimators_features = []
103
+
104
+ for i in range(n_estimators):
105
+ if verbose > 1:
106
+ print(
107
+ "Building estimator %d of %d for this parallel run (total %d)..."
108
+ % (i + 1, n_estimators, total_n_estimators)
109
+ )
110
+
111
+ random_state = seeds[i]
112
+ estimator = ensemble._make_estimator(append=False, random_state=random_state)
113
+
114
+ if has_check_input:
115
+ estimator_fit = partial(estimator.fit, check_input=check_input)
116
+ else:
117
+ estimator_fit = estimator.fit
118
+
119
+ # Draw random feature, sample indices
120
+ features, indices = _generate_bagging_indices(
121
+ random_state,
122
+ bootstrap_features,
123
+ bootstrap,
124
+ n_features,
125
+ n_samples,
126
+ max_features,
127
+ max_samples,
128
+ )
129
+
130
+ # Draw samples, using sample weights, and then fit
131
+ if support_sample_weight:
132
+ if sample_weight is None:
133
+ curr_sample_weight = np.ones((n_samples,))
134
+ else:
135
+ curr_sample_weight = sample_weight.copy()
136
+
137
+ if bootstrap:
138
+ sample_counts = np.bincount(indices, minlength=n_samples)
139
+ curr_sample_weight *= sample_counts
140
+ else:
141
+ not_indices_mask = ~indices_to_mask(indices, n_samples)
142
+ curr_sample_weight[not_indices_mask] = 0
143
+
144
+ X_ = X[:, features] if requires_feature_indexing else X
145
+ estimator_fit(X_, y, sample_weight=curr_sample_weight)
146
+ else:
147
+ X_ = X[indices][:, features] if requires_feature_indexing else X[indices]
148
+ estimator_fit(X_, y[indices])
149
+
150
+ estimators.append(estimator)
151
+ estimators_features.append(features)
152
+
153
+ return estimators, estimators_features
154
+
155
+
156
+ def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
157
+ """Private function used to compute (proba-)predictions within a job."""
158
+ n_samples = X.shape[0]
159
+ proba = np.zeros((n_samples, n_classes))
160
+
161
+ for estimator, features in zip(estimators, estimators_features):
162
+ if hasattr(estimator, "predict_proba"):
163
+ proba_estimator = estimator.predict_proba(X[:, features])
164
+
165
+ if n_classes == len(estimator.classes_):
166
+ proba += proba_estimator
167
+
168
+ else:
169
+ proba[:, estimator.classes_] += proba_estimator[
170
+ :, range(len(estimator.classes_))
171
+ ]
172
+
173
+ else:
174
+ # Resort to voting
175
+ predictions = estimator.predict(X[:, features])
176
+
177
+ for i in range(n_samples):
178
+ proba[i, predictions[i]] += 1
179
+
180
+ return proba
181
+
182
+
183
+ def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
184
+ """Private function used to compute log probabilities within a job."""
185
+ n_samples = X.shape[0]
186
+ log_proba = np.empty((n_samples, n_classes))
187
+ log_proba.fill(-np.inf)
188
+ all_classes = np.arange(n_classes, dtype=int)
189
+
190
+ for estimator, features in zip(estimators, estimators_features):
191
+ log_proba_estimator = estimator.predict_log_proba(X[:, features])
192
+
193
+ if n_classes == len(estimator.classes_):
194
+ log_proba = np.logaddexp(log_proba, log_proba_estimator)
195
+
196
+ else:
197
+ log_proba[:, estimator.classes_] = np.logaddexp(
198
+ log_proba[:, estimator.classes_],
199
+ log_proba_estimator[:, range(len(estimator.classes_))],
200
+ )
201
+
202
+ missing = np.setdiff1d(all_classes, estimator.classes_)
203
+ log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)
204
+
205
+ return log_proba
206
+
207
+
208
+ def _parallel_decision_function(estimators, estimators_features, X):
209
+ """Private function used to compute decisions within a job."""
210
+ return sum(
211
+ estimator.decision_function(X[:, features])
212
+ for estimator, features in zip(estimators, estimators_features)
213
+ )
214
+
215
+
216
+ def _parallel_predict_regression(estimators, estimators_features, X):
217
+ """Private function used to compute predictions within a job."""
218
+ return sum(
219
+ estimator.predict(X[:, features])
220
+ for estimator, features in zip(estimators, estimators_features)
221
+ )
222
+
223
+
224
+ def _estimator_has(attr):
225
+ """Check if we can delegate a method to the underlying estimator.
226
+
227
+ First, we check the first fitted estimator if available, otherwise we
228
+ check the estimator attribute.
229
+ """
230
+
231
+ def check(self):
232
+ if hasattr(self, "estimators_"):
233
+ return hasattr(self.estimators_[0], attr)
234
+ else: # self.estimator is not None
235
+ return hasattr(self.estimator, attr)
236
+
237
+ return check
238
+
239
+
240
+ class BaseBagging(BaseEnsemble, metaclass=ABCMeta):
241
+ """Base class for Bagging meta-estimator.
242
+
243
+ Warning: This class should not be used directly. Use derived classes
244
+ instead.
245
+ """
246
+
247
+ _parameter_constraints: dict = {
248
+ "estimator": [HasMethods(["fit", "predict"]), None],
249
+ "n_estimators": [Interval(Integral, 1, None, closed="left")],
250
+ "max_samples": [
251
+ Interval(Integral, 1, None, closed="left"),
252
+ Interval(RealNotInt, 0, 1, closed="right"),
253
+ ],
254
+ "max_features": [
255
+ Interval(Integral, 1, None, closed="left"),
256
+ Interval(RealNotInt, 0, 1, closed="right"),
257
+ ],
258
+ "bootstrap": ["boolean"],
259
+ "bootstrap_features": ["boolean"],
260
+ "oob_score": ["boolean"],
261
+ "warm_start": ["boolean"],
262
+ "n_jobs": [None, Integral],
263
+ "random_state": ["random_state"],
264
+ "verbose": ["verbose"],
265
+ }
266
+
267
+ @abstractmethod
268
+ def __init__(
269
+ self,
270
+ estimator=None,
271
+ n_estimators=10,
272
+ *,
273
+ max_samples=1.0,
274
+ max_features=1.0,
275
+ bootstrap=True,
276
+ bootstrap_features=False,
277
+ oob_score=False,
278
+ warm_start=False,
279
+ n_jobs=None,
280
+ random_state=None,
281
+ verbose=0,
282
+ ):
283
+ super().__init__(
284
+ estimator=estimator,
285
+ n_estimators=n_estimators,
286
+ )
287
+ self.max_samples = max_samples
288
+ self.max_features = max_features
289
+ self.bootstrap = bootstrap
290
+ self.bootstrap_features = bootstrap_features
291
+ self.oob_score = oob_score
292
+ self.warm_start = warm_start
293
+ self.n_jobs = n_jobs
294
+ self.random_state = random_state
295
+ self.verbose = verbose
296
+
297
+ @_fit_context(
298
+ # BaseBagging.estimator is not validated yet
299
+ prefer_skip_nested_validation=False
300
+ )
301
+ def fit(self, X, y, sample_weight=None):
302
+ """Build a Bagging ensemble of estimators from the training set (X, y).
303
+
304
+ Parameters
305
+ ----------
306
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
307
+ The training input samples. Sparse matrices are accepted only if
308
+ they are supported by the base estimator.
309
+
310
+ y : array-like of shape (n_samples,)
311
+ The target values (class labels in classification, real numbers in
312
+ regression).
313
+
314
+ sample_weight : array-like of shape (n_samples,), default=None
315
+ Sample weights. If None, then samples are equally weighted.
316
+ Note that this is supported only if the base estimator supports
317
+ sample weighting.
318
+
319
+ Returns
320
+ -------
321
+ self : object
322
+ Fitted estimator.
323
+ """
324
+ _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
325
+ # Convert data (X is required to be 2d and indexable)
326
+ X, y = self._validate_data(
327
+ X,
328
+ y,
329
+ accept_sparse=["csr", "csc"],
330
+ dtype=None,
331
+ force_all_finite=False,
332
+ multi_output=True,
333
+ )
334
+ return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
335
+
336
+ def _parallel_args(self):
337
+ return {}
338
+
339
+ def _fit(
340
+ self,
341
+ X,
342
+ y,
343
+ max_samples=None,
344
+ max_depth=None,
345
+ sample_weight=None,
346
+ check_input=True,
347
+ ):
348
+ """Build a Bagging ensemble of estimators from the training
349
+ set (X, y).
350
+
351
+ Parameters
352
+ ----------
353
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
354
+ The training input samples. Sparse matrices are accepted only if
355
+ they are supported by the base estimator.
356
+
357
+ y : array-like of shape (n_samples,)
358
+ The target values (class labels in classification, real numbers in
359
+ regression).
360
+
361
+ max_samples : int or float, default=None
362
+ Argument to use instead of self.max_samples.
363
+
364
+ max_depth : int, default=None
365
+ Override value used when constructing base estimator. Only
366
+ supported if the base estimator has a max_depth parameter.
367
+
368
+ sample_weight : array-like of shape (n_samples,), default=None
369
+ Sample weights. If None, then samples are equally weighted.
370
+ Note that this is supported only if the base estimator supports
371
+ sample weighting.
372
+
373
+ check_input : bool, default=True
374
+ Override value used when fitting base estimator. Only supported
375
+ if the base estimator has a check_input parameter for fit function.
376
+
377
+ Returns
378
+ -------
379
+ self : object
380
+ Fitted estimator.
381
+ """
382
+ random_state = check_random_state(self.random_state)
383
+
384
+ if sample_weight is not None:
385
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
386
+
387
+ # Remap output
388
+ n_samples = X.shape[0]
389
+ self._n_samples = n_samples
390
+ y = self._validate_y(y)
391
+
392
+ # Check parameters
393
+ self._validate_estimator()
394
+
395
+ if max_depth is not None:
396
+ self.estimator_.max_depth = max_depth
397
+
398
+ # Validate max_samples
399
+ if max_samples is None:
400
+ max_samples = self.max_samples
401
+ elif not isinstance(max_samples, numbers.Integral):
402
+ max_samples = int(max_samples * X.shape[0])
403
+
404
+ if max_samples > X.shape[0]:
405
+ raise ValueError("max_samples must be <= n_samples")
406
+
407
+ # Store validated integer row sampling value
408
+ self._max_samples = max_samples
409
+
410
+ # Validate max_features
411
+ if isinstance(self.max_features, numbers.Integral):
412
+ max_features = self.max_features
413
+ elif isinstance(self.max_features, float):
414
+ max_features = int(self.max_features * self.n_features_in_)
415
+
416
+ if max_features > self.n_features_in_:
417
+ raise ValueError("max_features must be <= n_features")
418
+
419
+ max_features = max(1, int(max_features))
420
+
421
+ # Store validated integer feature sampling value
422
+ self._max_features = max_features
423
+
424
+ # Other checks
425
+ if not self.bootstrap and self.oob_score:
426
+ raise ValueError("Out of bag estimation only available if bootstrap=True")
427
+
428
+ if self.warm_start and self.oob_score:
429
+ raise ValueError("Out of bag estimate only available if warm_start=False")
430
+
431
+ if hasattr(self, "oob_score_") and self.warm_start:
432
+ del self.oob_score_
433
+
434
+ if not self.warm_start or not hasattr(self, "estimators_"):
435
+ # Free allocated memory, if any
436
+ self.estimators_ = []
437
+ self.estimators_features_ = []
438
+
439
+ n_more_estimators = self.n_estimators - len(self.estimators_)
440
+
441
+ if n_more_estimators < 0:
442
+ raise ValueError(
443
+ "n_estimators=%d must be larger or equal to "
444
+ "len(estimators_)=%d when warm_start==True"
445
+ % (self.n_estimators, len(self.estimators_))
446
+ )
447
+
448
+ elif n_more_estimators == 0:
449
+ warn(
450
+ "Warm-start fitting without increasing n_estimators does not "
451
+ "fit new trees."
452
+ )
453
+ return self
454
+
455
+ # Parallel loop
456
+ n_jobs, n_estimators, starts = _partition_estimators(
457
+ n_more_estimators, self.n_jobs
458
+ )
459
+ total_n_estimators = sum(n_estimators)
460
+
461
+ # Advance random state to state after training
462
+ # the first n_estimators
463
+ if self.warm_start and len(self.estimators_) > 0:
464
+ random_state.randint(MAX_INT, size=len(self.estimators_))
465
+
466
+ seeds = random_state.randint(MAX_INT, size=n_more_estimators)
467
+ self._seeds = seeds
468
+
469
+ all_results = Parallel(
470
+ n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
471
+ )(
472
+ delayed(_parallel_build_estimators)(
473
+ n_estimators[i],
474
+ self,
475
+ X,
476
+ y,
477
+ sample_weight,
478
+ seeds[starts[i] : starts[i + 1]],
479
+ total_n_estimators,
480
+ verbose=self.verbose,
481
+ check_input=check_input,
482
+ )
483
+ for i in range(n_jobs)
484
+ )
485
+
486
+ # Reduce
487
+ self.estimators_ += list(
488
+ itertools.chain.from_iterable(t[0] for t in all_results)
489
+ )
490
+ self.estimators_features_ += list(
491
+ itertools.chain.from_iterable(t[1] for t in all_results)
492
+ )
493
+
494
+ if self.oob_score:
495
+ self._set_oob_score(X, y)
496
+
497
+ return self
498
+
499
+ @abstractmethod
500
+ def _set_oob_score(self, X, y):
501
+ """Calculate out of bag predictions and score."""
502
+
503
+ def _validate_y(self, y):
504
+ if len(y.shape) == 1 or y.shape[1] == 1:
505
+ return column_or_1d(y, warn=True)
506
+ return y
507
+
508
+ def _get_estimators_indices(self):
509
+ # Get drawn indices along both sample and feature axes
510
+ for seed in self._seeds:
511
+ # Operations accessing random_state must be performed identically
512
+ # to those in `_parallel_build_estimators()`
513
+ feature_indices, sample_indices = _generate_bagging_indices(
514
+ seed,
515
+ self.bootstrap_features,
516
+ self.bootstrap,
517
+ self.n_features_in_,
518
+ self._n_samples,
519
+ self._max_features,
520
+ self._max_samples,
521
+ )
522
+
523
+ yield feature_indices, sample_indices
524
+
525
+ @property
526
+ def estimators_samples_(self):
527
+ """
528
+ The subset of drawn samples for each base estimator.
529
+
530
+ Returns a dynamically generated list of indices identifying
531
+ the samples used for fitting each member of the ensemble, i.e.,
532
+ the in-bag samples.
533
+
534
+ Note: the list is re-created at each call to the property in order
535
+ to reduce the object memory footprint by not storing the sampling
536
+ data. Thus fetching the property may be slower than expected.
537
+ """
538
+ return [sample_indices for _, sample_indices in self._get_estimators_indices()]
539
+
540
+
541
+ class BaggingClassifier(_RoutingNotSupportedMixin, ClassifierMixin, BaseBagging):
542
+ """A Bagging classifier.
543
+
544
+ A Bagging classifier is an ensemble meta-estimator that fits base
545
+ classifiers each on random subsets of the original dataset and then
546
+ aggregate their individual predictions (either by voting or by averaging)
547
+ to form a final prediction. Such a meta-estimator can typically be used as
548
+ a way to reduce the variance of a black-box estimator (e.g., a decision
549
+ tree), by introducing randomization into its construction procedure and
550
+ then making an ensemble out of it.
551
+
552
+ This algorithm encompasses several works from the literature. When random
553
+ subsets of the dataset are drawn as random subsets of the samples, then
554
+ this algorithm is known as Pasting [1]_. If samples are drawn with
555
+ replacement, then the method is known as Bagging [2]_. When random subsets
556
+ of the dataset are drawn as random subsets of the features, then the method
557
+ is known as Random Subspaces [3]_. Finally, when base estimators are built
558
+ on subsets of both samples and features, then the method is known as
559
+ Random Patches [4]_.
560
+
561
+ Read more in the :ref:`User Guide <bagging>`.
562
+
563
+ .. versionadded:: 0.15
564
+
565
+ Parameters
566
+ ----------
567
+ estimator : object, default=None
568
+ The base estimator to fit on random subsets of the dataset.
569
+ If None, then the base estimator is a
570
+ :class:`~sklearn.tree.DecisionTreeClassifier`.
571
+
572
+ .. versionadded:: 1.2
573
+ `base_estimator` was renamed to `estimator`.
574
+
575
+ n_estimators : int, default=10
576
+ The number of base estimators in the ensemble.
577
+
578
+ max_samples : int or float, default=1.0
579
+ The number of samples to draw from X to train each base estimator (with
580
+ replacement by default, see `bootstrap` for more details).
581
+
582
+ - If int, then draw `max_samples` samples.
583
+ - If float, then draw `max_samples * X.shape[0]` samples.
584
+
585
+ max_features : int or float, default=1.0
586
+ The number of features to draw from X to train each base estimator (
587
+ without replacement by default, see `bootstrap_features` for more
588
+ details).
589
+
590
+ - If int, then draw `max_features` features.
591
+ - If float, then draw `max(1, int(max_features * n_features_in_))` features.
592
+
593
+ bootstrap : bool, default=True
594
+ Whether samples are drawn with replacement. If False, sampling
595
+ without replacement is performed.
596
+
597
+ bootstrap_features : bool, default=False
598
+ Whether features are drawn with replacement.
599
+
600
+ oob_score : bool, default=False
601
+ Whether to use out-of-bag samples to estimate
602
+ the generalization error. Only available if bootstrap=True.
603
+
604
+ warm_start : bool, default=False
605
+ When set to True, reuse the solution of the previous call to fit
606
+ and add more estimators to the ensemble, otherwise, just fit
607
+ a whole new ensemble. See :term:`the Glossary <warm_start>`.
608
+
609
+ .. versionadded:: 0.17
610
+ *warm_start* constructor parameter.
611
+
612
+ n_jobs : int, default=None
613
+ The number of jobs to run in parallel for both :meth:`fit` and
614
+ :meth:`predict`. ``None`` means 1 unless in a
615
+ :obj:`joblib.parallel_backend` context. ``-1`` means using all
616
+ processors. See :term:`Glossary <n_jobs>` for more details.
617
+
618
+ random_state : int, RandomState instance or None, default=None
619
+ Controls the random resampling of the original dataset
620
+ (sample wise and feature wise).
621
+ If the base estimator accepts a `random_state` attribute, a different
622
+ seed is generated for each instance in the ensemble.
623
+ Pass an int for reproducible output across multiple function calls.
624
+ See :term:`Glossary <random_state>`.
625
+
626
+ verbose : int, default=0
627
+ Controls the verbosity when fitting and predicting.
628
+
629
+ Attributes
630
+ ----------
631
+ estimator_ : estimator
632
+ The base estimator from which the ensemble is grown.
633
+
634
+ .. versionadded:: 1.2
635
+ `base_estimator_` was renamed to `estimator_`.
636
+
637
+ n_features_in_ : int
638
+ Number of features seen during :term:`fit`.
639
+
640
+ .. versionadded:: 0.24
641
+
642
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
643
+ Names of features seen during :term:`fit`. Defined only when `X`
644
+ has feature names that are all strings.
645
+
646
+ .. versionadded:: 1.0
647
+
648
+ estimators_ : list of estimators
649
+ The collection of fitted base estimators.
650
+
651
+ estimators_samples_ : list of arrays
652
+ The subset of drawn samples (i.e., the in-bag samples) for each base
653
+ estimator. Each subset is defined by an array of the indices selected.
654
+
655
+ estimators_features_ : list of arrays
656
+ The subset of drawn features for each base estimator.
657
+
658
+ classes_ : ndarray of shape (n_classes,)
659
+ The classes labels.
660
+
661
+ n_classes_ : int or list
662
+ The number of classes.
663
+
664
+ oob_score_ : float
665
+ Score of the training dataset obtained using an out-of-bag estimate.
666
+ This attribute exists only when ``oob_score`` is True.
667
+
668
+ oob_decision_function_ : ndarray of shape (n_samples, n_classes)
669
+ Decision function computed with out-of-bag estimate on the training
670
+ set. If n_estimators is small it might be possible that a data point
671
+ was never left out during the bootstrap. In this case,
672
+ `oob_decision_function_` might contain NaN. This attribute exists
673
+ only when ``oob_score`` is True.
674
+
675
+ See Also
676
+ --------
677
+ BaggingRegressor : A Bagging regressor.
678
+
679
+ References
680
+ ----------
681
+
682
+ .. [1] L. Breiman, "Pasting small votes for classification in large
683
+ databases and on-line", Machine Learning, 36(1), 85-103, 1999.
684
+
685
+ .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
686
+ 1996.
687
+
688
+ .. [3] T. Ho, "The random subspace method for constructing decision
689
+ forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
690
+ 1998.
691
+
692
+ .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
693
+ Learning and Knowledge Discovery in Databases, 346-361, 2012.
694
+
695
+ Examples
696
+ --------
697
+ >>> from sklearn.svm import SVC
698
+ >>> from sklearn.ensemble import BaggingClassifier
699
+ >>> from sklearn.datasets import make_classification
700
+ >>> X, y = make_classification(n_samples=100, n_features=4,
701
+ ... n_informative=2, n_redundant=0,
702
+ ... random_state=0, shuffle=False)
703
+ >>> clf = BaggingClassifier(estimator=SVC(),
704
+ ... n_estimators=10, random_state=0).fit(X, y)
705
+ >>> clf.predict([[0, 0, 0, 0]])
706
+ array([1])
707
+ """
708
+
709
+ def __init__(
710
+ self,
711
+ estimator=None,
712
+ n_estimators=10,
713
+ *,
714
+ max_samples=1.0,
715
+ max_features=1.0,
716
+ bootstrap=True,
717
+ bootstrap_features=False,
718
+ oob_score=False,
719
+ warm_start=False,
720
+ n_jobs=None,
721
+ random_state=None,
722
+ verbose=0,
723
+ ):
724
+ super().__init__(
725
+ estimator=estimator,
726
+ n_estimators=n_estimators,
727
+ max_samples=max_samples,
728
+ max_features=max_features,
729
+ bootstrap=bootstrap,
730
+ bootstrap_features=bootstrap_features,
731
+ oob_score=oob_score,
732
+ warm_start=warm_start,
733
+ n_jobs=n_jobs,
734
+ random_state=random_state,
735
+ verbose=verbose,
736
+ )
737
+
738
+ def _validate_estimator(self):
739
+ """Check the estimator and set the estimator_ attribute."""
740
+ super()._validate_estimator(default=DecisionTreeClassifier())
741
+
742
+ def _set_oob_score(self, X, y):
743
+ n_samples = y.shape[0]
744
+ n_classes_ = self.n_classes_
745
+
746
+ predictions = np.zeros((n_samples, n_classes_))
747
+
748
+ for estimator, samples, features in zip(
749
+ self.estimators_, self.estimators_samples_, self.estimators_features_
750
+ ):
751
+ # Create mask for OOB samples
752
+ mask = ~indices_to_mask(samples, n_samples)
753
+
754
+ if hasattr(estimator, "predict_proba"):
755
+ predictions[mask, :] += estimator.predict_proba(
756
+ (X[mask, :])[:, features]
757
+ )
758
+
759
+ else:
760
+ p = estimator.predict((X[mask, :])[:, features])
761
+ j = 0
762
+
763
+ for i in range(n_samples):
764
+ if mask[i]:
765
+ predictions[i, p[j]] += 1
766
+ j += 1
767
+
768
+ if (predictions.sum(axis=1) == 0).any():
769
+ warn(
770
+ "Some inputs do not have OOB scores. "
771
+ "This probably means too few estimators were used "
772
+ "to compute any reliable oob estimates."
773
+ )
774
+
775
+ oob_decision_function = predictions / predictions.sum(axis=1)[:, np.newaxis]
776
+ oob_score = accuracy_score(y, np.argmax(predictions, axis=1))
777
+
778
+ self.oob_decision_function_ = oob_decision_function
779
+ self.oob_score_ = oob_score
780
+
781
+ def _validate_y(self, y):
782
+ y = column_or_1d(y, warn=True)
783
+ check_classification_targets(y)
784
+ self.classes_, y = np.unique(y, return_inverse=True)
785
+ self.n_classes_ = len(self.classes_)
786
+
787
+ return y
788
+
789
+ def predict(self, X):
790
+ """Predict class for X.
791
+
792
+ The predicted class of an input sample is computed as the class with
793
+ the highest mean predicted probability. If base estimators do not
794
+ implement a ``predict_proba`` method, then it resorts to voting.
795
+
796
+ Parameters
797
+ ----------
798
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
799
+ The training input samples. Sparse matrices are accepted only if
800
+ they are supported by the base estimator.
801
+
802
+ Returns
803
+ -------
804
+ y : ndarray of shape (n_samples,)
805
+ The predicted classes.
806
+ """
807
+ predicted_probabilitiy = self.predict_proba(X)
808
+ return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0)
809
+
810
+ def predict_proba(self, X):
811
+ """Predict class probabilities for X.
812
+
813
+ The predicted class probabilities of an input sample is computed as
814
+ the mean predicted class probabilities of the base estimators in the
815
+ ensemble. If base estimators do not implement a ``predict_proba``
816
+ method, then it resorts to voting and the predicted class probabilities
817
+ of an input sample represents the proportion of estimators predicting
818
+ each class.
819
+
820
+ Parameters
821
+ ----------
822
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
823
+ The training input samples. Sparse matrices are accepted only if
824
+ they are supported by the base estimator.
825
+
826
+ Returns
827
+ -------
828
+ p : ndarray of shape (n_samples, n_classes)
829
+ The class probabilities of the input samples. The order of the
830
+ classes corresponds to that in the attribute :term:`classes_`.
831
+ """
832
+ check_is_fitted(self)
833
+ # Check data
834
+ X = self._validate_data(
835
+ X,
836
+ accept_sparse=["csr", "csc"],
837
+ dtype=None,
838
+ force_all_finite=False,
839
+ reset=False,
840
+ )
841
+
842
+ # Parallel loop
843
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
844
+
845
+ all_proba = Parallel(
846
+ n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
847
+ )(
848
+ delayed(_parallel_predict_proba)(
849
+ self.estimators_[starts[i] : starts[i + 1]],
850
+ self.estimators_features_[starts[i] : starts[i + 1]],
851
+ X,
852
+ self.n_classes_,
853
+ )
854
+ for i in range(n_jobs)
855
+ )
856
+
857
+ # Reduce
858
+ proba = sum(all_proba) / self.n_estimators
859
+
860
+ return proba
861
+
862
+ def predict_log_proba(self, X):
863
+ """Predict class log-probabilities for X.
864
+
865
+ The predicted class log-probabilities of an input sample is computed as
866
+ the log of the mean predicted class probabilities of the base
867
+ estimators in the ensemble.
868
+
869
+ Parameters
870
+ ----------
871
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
872
+ The training input samples. Sparse matrices are accepted only if
873
+ they are supported by the base estimator.
874
+
875
+ Returns
876
+ -------
877
+ p : ndarray of shape (n_samples, n_classes)
878
+ The class log-probabilities of the input samples. The order of the
879
+ classes corresponds to that in the attribute :term:`classes_`.
880
+ """
881
+ check_is_fitted(self)
882
+ if hasattr(self.estimator_, "predict_log_proba"):
883
+ # Check data
884
+ X = self._validate_data(
885
+ X,
886
+ accept_sparse=["csr", "csc"],
887
+ dtype=None,
888
+ force_all_finite=False,
889
+ reset=False,
890
+ )
891
+
892
+ # Parallel loop
893
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
894
+
895
+ all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
896
+ delayed(_parallel_predict_log_proba)(
897
+ self.estimators_[starts[i] : starts[i + 1]],
898
+ self.estimators_features_[starts[i] : starts[i + 1]],
899
+ X,
900
+ self.n_classes_,
901
+ )
902
+ for i in range(n_jobs)
903
+ )
904
+
905
+ # Reduce
906
+ log_proba = all_log_proba[0]
907
+
908
+ for j in range(1, len(all_log_proba)):
909
+ log_proba = np.logaddexp(log_proba, all_log_proba[j])
910
+
911
+ log_proba -= np.log(self.n_estimators)
912
+
913
+ else:
914
+ log_proba = np.log(self.predict_proba(X))
915
+
916
+ return log_proba
917
+
918
+ @available_if(_estimator_has("decision_function"))
919
+ def decision_function(self, X):
920
+ """Average of the decision functions of the base classifiers.
921
+
922
+ Parameters
923
+ ----------
924
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
925
+ The training input samples. Sparse matrices are accepted only if
926
+ they are supported by the base estimator.
927
+
928
+ Returns
929
+ -------
930
+ score : ndarray of shape (n_samples, k)
931
+ The decision function of the input samples. The columns correspond
932
+ to the classes in sorted order, as they appear in the attribute
933
+ ``classes_``. Regression and binary classification are special
934
+ cases with ``k == 1``, otherwise ``k==n_classes``.
935
+ """
936
+ check_is_fitted(self)
937
+
938
+ # Check data
939
+ X = self._validate_data(
940
+ X,
941
+ accept_sparse=["csr", "csc"],
942
+ dtype=None,
943
+ force_all_finite=False,
944
+ reset=False,
945
+ )
946
+
947
+ # Parallel loop
948
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
949
+
950
+ all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
951
+ delayed(_parallel_decision_function)(
952
+ self.estimators_[starts[i] : starts[i + 1]],
953
+ self.estimators_features_[starts[i] : starts[i + 1]],
954
+ X,
955
+ )
956
+ for i in range(n_jobs)
957
+ )
958
+
959
+ # Reduce
960
+ decisions = sum(all_decisions) / self.n_estimators
961
+
962
+ return decisions
963
+
964
+ def _more_tags(self):
965
+ if self.estimator is None:
966
+ estimator = DecisionTreeClassifier()
967
+ else:
968
+ estimator = self.estimator
969
+
970
+ return {"allow_nan": _safe_tags(estimator, "allow_nan")}
971
+
972
+
973
+ class BaggingRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseBagging):
974
+ """A Bagging regressor.
975
+
976
+ A Bagging regressor is an ensemble meta-estimator that fits base
977
+ regressors each on random subsets of the original dataset and then
978
+ aggregate their individual predictions (either by voting or by averaging)
979
+ to form a final prediction. Such a meta-estimator can typically be used as
980
+ a way to reduce the variance of a black-box estimator (e.g., a decision
981
+ tree), by introducing randomization into its construction procedure and
982
+ then making an ensemble out of it.
983
+
984
+ This algorithm encompasses several works from the literature. When random
985
+ subsets of the dataset are drawn as random subsets of the samples, then
986
+ this algorithm is known as Pasting [1]_. If samples are drawn with
987
+ replacement, then the method is known as Bagging [2]_. When random subsets
988
+ of the dataset are drawn as random subsets of the features, then the method
989
+ is known as Random Subspaces [3]_. Finally, when base estimators are built
990
+ on subsets of both samples and features, then the method is known as
991
+ Random Patches [4]_.
992
+
993
+ Read more in the :ref:`User Guide <bagging>`.
994
+
995
+ .. versionadded:: 0.15
996
+
997
+ Parameters
998
+ ----------
999
+ estimator : object, default=None
1000
+ The base estimator to fit on random subsets of the dataset.
1001
+ If None, then the base estimator is a
1002
+ :class:`~sklearn.tree.DecisionTreeRegressor`.
1003
+
1004
+ .. versionadded:: 1.2
1005
+ `base_estimator` was renamed to `estimator`.
1006
+
1007
+ n_estimators : int, default=10
1008
+ The number of base estimators in the ensemble.
1009
+
1010
+ max_samples : int or float, default=1.0
1011
+ The number of samples to draw from X to train each base estimator (with
1012
+ replacement by default, see `bootstrap` for more details).
1013
+
1014
+ - If int, then draw `max_samples` samples.
1015
+ - If float, then draw `max_samples * X.shape[0]` samples.
1016
+
1017
+ max_features : int or float, default=1.0
1018
+ The number of features to draw from X to train each base estimator (
1019
+ without replacement by default, see `bootstrap_features` for more
1020
+ details).
1021
+
1022
+ - If int, then draw `max_features` features.
1023
+ - If float, then draw `max(1, int(max_features * n_features_in_))` features.
1024
+
1025
+ bootstrap : bool, default=True
1026
+ Whether samples are drawn with replacement. If False, sampling
1027
+ without replacement is performed.
1028
+
1029
+ bootstrap_features : bool, default=False
1030
+ Whether features are drawn with replacement.
1031
+
1032
+ oob_score : bool, default=False
1033
+ Whether to use out-of-bag samples to estimate
1034
+ the generalization error. Only available if bootstrap=True.
1035
+
1036
+ warm_start : bool, default=False
1037
+ When set to True, reuse the solution of the previous call to fit
1038
+ and add more estimators to the ensemble, otherwise, just fit
1039
+ a whole new ensemble. See :term:`the Glossary <warm_start>`.
1040
+
1041
+ n_jobs : int, default=None
1042
+ The number of jobs to run in parallel for both :meth:`fit` and
1043
+ :meth:`predict`. ``None`` means 1 unless in a
1044
+ :obj:`joblib.parallel_backend` context. ``-1`` means using all
1045
+ processors. See :term:`Glossary <n_jobs>` for more details.
1046
+
1047
+ random_state : int, RandomState instance or None, default=None
1048
+ Controls the random resampling of the original dataset
1049
+ (sample wise and feature wise).
1050
+ If the base estimator accepts a `random_state` attribute, a different
1051
+ seed is generated for each instance in the ensemble.
1052
+ Pass an int for reproducible output across multiple function calls.
1053
+ See :term:`Glossary <random_state>`.
1054
+
1055
+ verbose : int, default=0
1056
+ Controls the verbosity when fitting and predicting.
1057
+
1058
+ Attributes
1059
+ ----------
1060
+ estimator_ : estimator
1061
+ The base estimator from which the ensemble is grown.
1062
+
1063
+ .. versionadded:: 1.2
1064
+ `base_estimator_` was renamed to `estimator_`.
1065
+
1066
+ n_features_in_ : int
1067
+ Number of features seen during :term:`fit`.
1068
+
1069
+ .. versionadded:: 0.24
1070
+
1071
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1072
+ Names of features seen during :term:`fit`. Defined only when `X`
1073
+ has feature names that are all strings.
1074
+
1075
+ .. versionadded:: 1.0
1076
+
1077
+ estimators_ : list of estimators
1078
+ The collection of fitted sub-estimators.
1079
+
1080
+ estimators_samples_ : list of arrays
1081
+ The subset of drawn samples (i.e., the in-bag samples) for each base
1082
+ estimator. Each subset is defined by an array of the indices selected.
1083
+
1084
+ estimators_features_ : list of arrays
1085
+ The subset of drawn features for each base estimator.
1086
+
1087
+ oob_score_ : float
1088
+ Score of the training dataset obtained using an out-of-bag estimate.
1089
+ This attribute exists only when ``oob_score`` is True.
1090
+
1091
+ oob_prediction_ : ndarray of shape (n_samples,)
1092
+ Prediction computed with out-of-bag estimate on the training
1093
+ set. If n_estimators is small it might be possible that a data point
1094
+ was never left out during the bootstrap. In this case,
1095
+ `oob_prediction_` might contain NaN. This attribute exists only
1096
+ when ``oob_score`` is True.
1097
+
1098
+ See Also
1099
+ --------
1100
+ BaggingClassifier : A Bagging classifier.
1101
+
1102
+ References
1103
+ ----------
1104
+
1105
+ .. [1] L. Breiman, "Pasting small votes for classification in large
1106
+ databases and on-line", Machine Learning, 36(1), 85-103, 1999.
1107
+
1108
+ .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1109
+ 1996.
1110
+
1111
+ .. [3] T. Ho, "The random subspace method for constructing decision
1112
+ forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1113
+ 1998.
1114
+
1115
+ .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
1116
+ Learning and Knowledge Discovery in Databases, 346-361, 2012.
1117
+
1118
+ Examples
1119
+ --------
1120
+ >>> from sklearn.svm import SVR
1121
+ >>> from sklearn.ensemble import BaggingRegressor
1122
+ >>> from sklearn.datasets import make_regression
1123
+ >>> X, y = make_regression(n_samples=100, n_features=4,
1124
+ ... n_informative=2, n_targets=1,
1125
+ ... random_state=0, shuffle=False)
1126
+ >>> regr = BaggingRegressor(estimator=SVR(),
1127
+ ... n_estimators=10, random_state=0).fit(X, y)
1128
+ >>> regr.predict([[0, 0, 0, 0]])
1129
+ array([-2.8720...])
1130
+ """
1131
+
1132
+ def __init__(
1133
+ self,
1134
+ estimator=None,
1135
+ n_estimators=10,
1136
+ *,
1137
+ max_samples=1.0,
1138
+ max_features=1.0,
1139
+ bootstrap=True,
1140
+ bootstrap_features=False,
1141
+ oob_score=False,
1142
+ warm_start=False,
1143
+ n_jobs=None,
1144
+ random_state=None,
1145
+ verbose=0,
1146
+ ):
1147
+ super().__init__(
1148
+ estimator=estimator,
1149
+ n_estimators=n_estimators,
1150
+ max_samples=max_samples,
1151
+ max_features=max_features,
1152
+ bootstrap=bootstrap,
1153
+ bootstrap_features=bootstrap_features,
1154
+ oob_score=oob_score,
1155
+ warm_start=warm_start,
1156
+ n_jobs=n_jobs,
1157
+ random_state=random_state,
1158
+ verbose=verbose,
1159
+ )
1160
+
1161
+ def predict(self, X):
1162
+ """Predict regression target for X.
1163
+
1164
+ The predicted regression target of an input sample is computed as the
1165
+ mean predicted regression targets of the estimators in the ensemble.
1166
+
1167
+ Parameters
1168
+ ----------
1169
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1170
+ The training input samples. Sparse matrices are accepted only if
1171
+ they are supported by the base estimator.
1172
+
1173
+ Returns
1174
+ -------
1175
+ y : ndarray of shape (n_samples,)
1176
+ The predicted values.
1177
+ """
1178
+ check_is_fitted(self)
1179
+ # Check data
1180
+ X = self._validate_data(
1181
+ X,
1182
+ accept_sparse=["csr", "csc"],
1183
+ dtype=None,
1184
+ force_all_finite=False,
1185
+ reset=False,
1186
+ )
1187
+
1188
+ # Parallel loop
1189
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
1190
+
1191
+ all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
1192
+ delayed(_parallel_predict_regression)(
1193
+ self.estimators_[starts[i] : starts[i + 1]],
1194
+ self.estimators_features_[starts[i] : starts[i + 1]],
1195
+ X,
1196
+ )
1197
+ for i in range(n_jobs)
1198
+ )
1199
+
1200
+ # Reduce
1201
+ y_hat = sum(all_y_hat) / self.n_estimators
1202
+
1203
+ return y_hat
1204
+
1205
+ def _validate_estimator(self):
1206
+ """Check the estimator and set the estimator_ attribute."""
1207
+ super()._validate_estimator(default=DecisionTreeRegressor())
1208
+
1209
+ def _set_oob_score(self, X, y):
1210
+ n_samples = y.shape[0]
1211
+
1212
+ predictions = np.zeros((n_samples,))
1213
+ n_predictions = np.zeros((n_samples,))
1214
+
1215
+ for estimator, samples, features in zip(
1216
+ self.estimators_, self.estimators_samples_, self.estimators_features_
1217
+ ):
1218
+ # Create mask for OOB samples
1219
+ mask = ~indices_to_mask(samples, n_samples)
1220
+
1221
+ predictions[mask] += estimator.predict((X[mask, :])[:, features])
1222
+ n_predictions[mask] += 1
1223
+
1224
+ if (n_predictions == 0).any():
1225
+ warn(
1226
+ "Some inputs do not have OOB scores. "
1227
+ "This probably means too few estimators were used "
1228
+ "to compute any reliable oob estimates."
1229
+ )
1230
+ n_predictions[n_predictions == 0] = 1
1231
+
1232
+ predictions /= n_predictions
1233
+
1234
+ self.oob_prediction_ = predictions
1235
+ self.oob_score_ = r2_score(y, predictions)
1236
+
1237
+ def _more_tags(self):
1238
+ if self.estimator is None:
1239
+ estimator = DecisionTreeRegressor()
1240
+ else:
1241
+ estimator = self.estimator
1242
+ return {"allow_nan": _safe_tags(estimator, "allow_nan")}
venv/lib/python3.10/site-packages/sklearn/ensemble/_base.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base class for ensemble-based estimators."""
2
+
3
+ # Authors: Gilles Louppe
4
+ # License: BSD 3 clause
5
+
6
+ from abc import ABCMeta, abstractmethod
7
+ from typing import List
8
+
9
+ import numpy as np
10
+ from joblib import effective_n_jobs
11
+
12
+ from ..base import BaseEstimator, MetaEstimatorMixin, clone, is_classifier, is_regressor
13
+ from ..utils import Bunch, _print_elapsed_time, check_random_state
14
+ from ..utils._tags import _safe_tags
15
+ from ..utils.metaestimators import _BaseComposition
16
+
17
+
18
+ def _fit_single_estimator(
19
+ estimator, X, y, sample_weight=None, message_clsname=None, message=None
20
+ ):
21
+ """Private function used to fit an estimator within a job."""
22
+ if sample_weight is not None:
23
+ try:
24
+ with _print_elapsed_time(message_clsname, message):
25
+ estimator.fit(X, y, sample_weight=sample_weight)
26
+ except TypeError as exc:
27
+ if "unexpected keyword argument 'sample_weight'" in str(exc):
28
+ raise TypeError(
29
+ "Underlying estimator {} does not support sample weights.".format(
30
+ estimator.__class__.__name__
31
+ )
32
+ ) from exc
33
+ raise
34
+ else:
35
+ with _print_elapsed_time(message_clsname, message):
36
+ estimator.fit(X, y)
37
+ return estimator
38
+
39
+
40
+ def _set_random_states(estimator, random_state=None):
41
+ """Set fixed random_state parameters for an estimator.
42
+
43
+ Finds all parameters ending ``random_state`` and sets them to integers
44
+ derived from ``random_state``.
45
+
46
+ Parameters
47
+ ----------
48
+ estimator : estimator supporting get/set_params
49
+ Estimator with potential randomness managed by random_state
50
+ parameters.
51
+
52
+ random_state : int, RandomState instance or None, default=None
53
+ Pseudo-random number generator to control the generation of the random
54
+ integers. Pass an int for reproducible output across multiple function
55
+ calls.
56
+ See :term:`Glossary <random_state>`.
57
+
58
+ Notes
59
+ -----
60
+ This does not necessarily set *all* ``random_state`` attributes that
61
+ control an estimator's randomness, only those accessible through
62
+ ``estimator.get_params()``. ``random_state``s not controlled include
63
+ those belonging to:
64
+
65
+ * cross-validation splitters
66
+ * ``scipy.stats`` rvs
67
+ """
68
+ random_state = check_random_state(random_state)
69
+ to_set = {}
70
+ for key in sorted(estimator.get_params(deep=True)):
71
+ if key == "random_state" or key.endswith("__random_state"):
72
+ to_set[key] = random_state.randint(np.iinfo(np.int32).max)
73
+
74
+ if to_set:
75
+ estimator.set_params(**to_set)
76
+
77
+
78
+ class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
79
+ """Base class for all ensemble classes.
80
+
81
+ Warning: This class should not be used directly. Use derived classes
82
+ instead.
83
+
84
+ Parameters
85
+ ----------
86
+ estimator : object
87
+ The base estimator from which the ensemble is built.
88
+
89
+ n_estimators : int, default=10
90
+ The number of estimators in the ensemble.
91
+
92
+ estimator_params : list of str, default=tuple()
93
+ The list of attributes to use as parameters when instantiating a
94
+ new base estimator. If none are given, default parameters are used.
95
+
96
+ Attributes
97
+ ----------
98
+ estimator_ : estimator
99
+ The base estimator from which the ensemble is grown.
100
+
101
+ estimators_ : list of estimators
102
+ The collection of fitted base estimators.
103
+ """
104
+
105
+ # overwrite _required_parameters from MetaEstimatorMixin
106
+ _required_parameters: List[str] = []
107
+
108
+ @abstractmethod
109
+ def __init__(
110
+ self,
111
+ estimator=None,
112
+ *,
113
+ n_estimators=10,
114
+ estimator_params=tuple(),
115
+ ):
116
+ # Set parameters
117
+ self.estimator = estimator
118
+ self.n_estimators = n_estimators
119
+ self.estimator_params = estimator_params
120
+
121
+ # Don't instantiate estimators now! Parameters of estimator might
122
+ # still change. Eg., when grid-searching with the nested object syntax.
123
+ # self.estimators_ needs to be filled by the derived classes in fit.
124
+
125
+ def _validate_estimator(self, default=None):
126
+ """Check the base estimator.
127
+
128
+ Sets the `estimator_` attributes.
129
+ """
130
+ if self.estimator is not None:
131
+ self.estimator_ = self.estimator
132
+ else:
133
+ self.estimator_ = default
134
+
135
+ def _make_estimator(self, append=True, random_state=None):
136
+ """Make and configure a copy of the `estimator_` attribute.
137
+
138
+ Warning: This method should be used to properly instantiate new
139
+ sub-estimators.
140
+ """
141
+ estimator = clone(self.estimator_)
142
+ estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
143
+
144
+ if random_state is not None:
145
+ _set_random_states(estimator, random_state)
146
+
147
+ if append:
148
+ self.estimators_.append(estimator)
149
+
150
+ return estimator
151
+
152
+ def __len__(self):
153
+ """Return the number of estimators in the ensemble."""
154
+ return len(self.estimators_)
155
+
156
+ def __getitem__(self, index):
157
+ """Return the index'th estimator in the ensemble."""
158
+ return self.estimators_[index]
159
+
160
+ def __iter__(self):
161
+ """Return iterator over estimators in the ensemble."""
162
+ return iter(self.estimators_)
163
+
164
+
165
+ def _partition_estimators(n_estimators, n_jobs):
166
+ """Private function used to partition estimators between jobs."""
167
+ # Compute the number of jobs
168
+ n_jobs = min(effective_n_jobs(n_jobs), n_estimators)
169
+
170
+ # Partition estimators between jobs
171
+ n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int)
172
+ n_estimators_per_job[: n_estimators % n_jobs] += 1
173
+ starts = np.cumsum(n_estimators_per_job)
174
+
175
+ return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
176
+
177
+
178
+ class _BaseHeterogeneousEnsemble(
179
+ MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta
180
+ ):
181
+ """Base class for heterogeneous ensemble of learners.
182
+
183
+ Parameters
184
+ ----------
185
+ estimators : list of (str, estimator) tuples
186
+ The ensemble of estimators to use in the ensemble. Each element of the
187
+ list is defined as a tuple of string (i.e. name of the estimator) and
188
+ an estimator instance. An estimator can be set to `'drop'` using
189
+ `set_params`.
190
+
191
+ Attributes
192
+ ----------
193
+ estimators_ : list of estimators
194
+ The elements of the estimators parameter, having been fitted on the
195
+ training data. If an estimator has been set to `'drop'`, it will not
196
+ appear in `estimators_`.
197
+ """
198
+
199
+ _required_parameters = ["estimators"]
200
+
201
+ @property
202
+ def named_estimators(self):
203
+ """Dictionary to access any fitted sub-estimators by name.
204
+
205
+ Returns
206
+ -------
207
+ :class:`~sklearn.utils.Bunch`
208
+ """
209
+ return Bunch(**dict(self.estimators))
210
+
211
+ @abstractmethod
212
+ def __init__(self, estimators):
213
+ self.estimators = estimators
214
+
215
+ def _validate_estimators(self):
216
+ if len(self.estimators) == 0:
217
+ raise ValueError(
218
+ "Invalid 'estimators' attribute, 'estimators' should be a "
219
+ "non-empty list of (string, estimator) tuples."
220
+ )
221
+ names, estimators = zip(*self.estimators)
222
+ # defined by MetaEstimatorMixin
223
+ self._validate_names(names)
224
+
225
+ has_estimator = any(est != "drop" for est in estimators)
226
+ if not has_estimator:
227
+ raise ValueError(
228
+ "All estimators are dropped. At least one is required "
229
+ "to be an estimator."
230
+ )
231
+
232
+ is_estimator_type = is_classifier if is_classifier(self) else is_regressor
233
+
234
+ for est in estimators:
235
+ if est != "drop" and not is_estimator_type(est):
236
+ raise ValueError(
237
+ "The estimator {} should be a {}.".format(
238
+ est.__class__.__name__, is_estimator_type.__name__[3:]
239
+ )
240
+ )
241
+
242
+ return names, estimators
243
+
244
+ def set_params(self, **params):
245
+ """
246
+ Set the parameters of an estimator from the ensemble.
247
+
248
+ Valid parameter keys can be listed with `get_params()`. Note that you
249
+ can directly set the parameters of the estimators contained in
250
+ `estimators`.
251
+
252
+ Parameters
253
+ ----------
254
+ **params : keyword arguments
255
+ Specific parameters using e.g.
256
+ `set_params(parameter_name=new_value)`. In addition, to setting the
257
+ parameters of the estimator, the individual estimator of the
258
+ estimators can also be set, or can be removed by setting them to
259
+ 'drop'.
260
+
261
+ Returns
262
+ -------
263
+ self : object
264
+ Estimator instance.
265
+ """
266
+ super()._set_params("estimators", **params)
267
+ return self
268
+
269
+ def get_params(self, deep=True):
270
+ """
271
+ Get the parameters of an estimator from the ensemble.
272
+
273
+ Returns the parameters given in the constructor as well as the
274
+ estimators contained within the `estimators` parameter.
275
+
276
+ Parameters
277
+ ----------
278
+ deep : bool, default=True
279
+ Setting it to True gets the various estimators and the parameters
280
+ of the estimators as well.
281
+
282
+ Returns
283
+ -------
284
+ params : dict
285
+ Parameter and estimator names mapped to their values or parameter
286
+ names mapped to their values.
287
+ """
288
+ return super()._get_params("estimators", deep=deep)
289
+
290
+ def _more_tags(self):
291
+ try:
292
+ allow_nan = all(
293
+ _safe_tags(est[1])["allow_nan"] if est[1] != "drop" else True
294
+ for est in self.estimators
295
+ )
296
+ except Exception:
297
+ # If `estimators` does not comply with our API (list of tuples) then it will
298
+ # fail. In this case, we assume that `allow_nan` is False but the parameter
299
+ # validation will raise an error during `fit`.
300
+ allow_nan = False
301
+ return {"preserves_dtype": [], "allow_nan": allow_nan}
venv/lib/python3.10/site-packages/sklearn/ensemble/_forest.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_gb.py ADDED
@@ -0,0 +1,2168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gradient Boosted Regression Trees.
2
+
3
+ This module contains methods for fitting gradient boosted regression trees for
4
+ both classification and regression.
5
+
6
+ The module structure is the following:
7
+
8
+ - The ``BaseGradientBoosting`` base class implements a common ``fit`` method
9
+ for all the estimators in the module. Regression and classification
10
+ only differ in the concrete ``LossFunction`` used.
11
+
12
+ - ``GradientBoostingClassifier`` implements gradient boosting for
13
+ classification problems.
14
+
15
+ - ``GradientBoostingRegressor`` implements gradient boosting for
16
+ regression problems.
17
+ """
18
+
19
+ # Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
20
+ # Arnaud Joly, Jacob Schreiber
21
+ # License: BSD 3 clause
22
+
23
+ import math
24
+ import warnings
25
+ from abc import ABCMeta, abstractmethod
26
+ from numbers import Integral, Real
27
+ from time import time
28
+
29
+ import numpy as np
30
+ from scipy.sparse import csc_matrix, csr_matrix, issparse
31
+
32
+ from .._loss.loss import (
33
+ _LOSSES,
34
+ AbsoluteError,
35
+ ExponentialLoss,
36
+ HalfBinomialLoss,
37
+ HalfMultinomialLoss,
38
+ HalfSquaredError,
39
+ HuberLoss,
40
+ PinballLoss,
41
+ )
42
+ from ..base import ClassifierMixin, RegressorMixin, _fit_context, is_classifier
43
+ from ..dummy import DummyClassifier, DummyRegressor
44
+ from ..exceptions import NotFittedError
45
+ from ..model_selection import train_test_split
46
+ from ..preprocessing import LabelEncoder
47
+ from ..tree import DecisionTreeRegressor
48
+ from ..tree._tree import DOUBLE, DTYPE, TREE_LEAF
49
+ from ..utils import check_array, check_random_state, column_or_1d
50
+ from ..utils._param_validation import HasMethods, Interval, StrOptions
51
+ from ..utils.multiclass import check_classification_targets
52
+ from ..utils.stats import _weighted_percentile
53
+ from ..utils.validation import _check_sample_weight, check_is_fitted
54
+ from ._base import BaseEnsemble
55
+ from ._gradient_boosting import _random_sample_mask, predict_stage, predict_stages
56
+
57
+ _LOSSES = _LOSSES.copy()
58
+ _LOSSES.update(
59
+ {
60
+ "quantile": PinballLoss,
61
+ "huber": HuberLoss,
62
+ }
63
+ )
64
+
65
+
66
+ def _safe_divide(numerator, denominator):
67
+ """Prevents overflow and division by zero."""
68
+ # This is used for classifiers where the denominator might become zero exatly.
69
+ # For instance for log loss, HalfBinomialLoss, if proba=0 or proba=1 exactly, then
70
+ # denominator = hessian = 0, and we should set the node value in the line search to
71
+ # zero as there is no improvement of the loss possible.
72
+ # For numerical safety, we do this already for extremely tiny values.
73
+ if abs(denominator) < 1e-150:
74
+ return 0.0
75
+ else:
76
+ # Cast to Python float to trigger Python errors, e.g. ZeroDivisionError,
77
+ # without relying on `np.errstate` that is not supported by Pyodide.
78
+ result = float(numerator) / float(denominator)
79
+ # Cast to Python float to trigger a ZeroDivisionError without relying
80
+ # on `np.errstate` that is not supported by Pyodide.
81
+ result = float(numerator) / float(denominator)
82
+ if math.isinf(result):
83
+ warnings.warn("overflow encountered in _safe_divide", RuntimeWarning)
84
+ return result
85
+
86
+
87
+ def _init_raw_predictions(X, estimator, loss, use_predict_proba):
88
+ """Return the initial raw predictions.
89
+
90
+ Parameters
91
+ ----------
92
+ X : ndarray of shape (n_samples, n_features)
93
+ The data array.
94
+ estimator : object
95
+ The estimator to use to compute the predictions.
96
+ loss : BaseLoss
97
+ An instance of a loss function class.
98
+ use_predict_proba : bool
99
+ Whether estimator.predict_proba is used instead of estimator.predict.
100
+
101
+ Returns
102
+ -------
103
+ raw_predictions : ndarray of shape (n_samples, K)
104
+ The initial raw predictions. K is equal to 1 for binary
105
+ classification and regression, and equal to the number of classes
106
+ for multiclass classification. ``raw_predictions`` is casted
107
+ into float64.
108
+ """
109
+ # TODO: Use loss.fit_intercept_only where appropriate instead of
110
+ # DummyRegressor which is the default given by the `init` parameter,
111
+ # see also _init_state.
112
+ if use_predict_proba:
113
+ # Our parameter validation, set via _fit_context and _parameter_constraints
114
+ # already guarantees that estimator has a predict_proba method.
115
+ predictions = estimator.predict_proba(X)
116
+ if not loss.is_multiclass:
117
+ predictions = predictions[:, 1] # probability of positive class
118
+ eps = np.finfo(np.float32).eps # FIXME: This is quite large!
119
+ predictions = np.clip(predictions, eps, 1 - eps, dtype=np.float64)
120
+ else:
121
+ predictions = estimator.predict(X).astype(np.float64)
122
+
123
+ if predictions.ndim == 1:
124
+ return loss.link.link(predictions).reshape(-1, 1)
125
+ else:
126
+ return loss.link.link(predictions)
127
+
128
+
129
+ def _update_terminal_regions(
130
+ loss,
131
+ tree,
132
+ X,
133
+ y,
134
+ neg_gradient,
135
+ raw_prediction,
136
+ sample_weight,
137
+ sample_mask,
138
+ learning_rate=0.1,
139
+ k=0,
140
+ ):
141
+ """Update the leaf values to be predicted by the tree and raw_prediction.
142
+
143
+ The current raw predictions of the model (of this stage) are updated.
144
+
145
+ Additionally, the terminal regions (=leaves) of the given tree are updated as well.
146
+ This corresponds to the line search step in "Greedy Function Approximation" by
147
+ Friedman, Algorithm 1 step 5.
148
+
149
+ Update equals:
150
+ argmin_{x} loss(y_true, raw_prediction_old + x * tree.value)
151
+
152
+ For non-trivial cases like the Binomial loss, the update has no closed formula and
153
+ is an approximation, again, see the Friedman paper.
154
+
155
+ Also note that the update formula for the SquaredError is the identity. Therefore,
156
+ in this case, the leaf values don't need an update and only the raw_predictions are
157
+ updated (with the learning rate included).
158
+
159
+ Parameters
160
+ ----------
161
+ loss : BaseLoss
162
+ tree : tree.Tree
163
+ The tree object.
164
+ X : ndarray of shape (n_samples, n_features)
165
+ The data array.
166
+ y : ndarray of shape (n_samples,)
167
+ The target labels.
168
+ neg_gradient : ndarray of shape (n_samples,)
169
+ The negative gradient.
170
+ raw_prediction : ndarray of shape (n_samples, n_trees_per_iteration)
171
+ The raw predictions (i.e. values from the tree leaves) of the
172
+ tree ensemble at iteration ``i - 1``.
173
+ sample_weight : ndarray of shape (n_samples,)
174
+ The weight of each sample.
175
+ sample_mask : ndarray of shape (n_samples,)
176
+ The sample mask to be used.
177
+ learning_rate : float, default=0.1
178
+ Learning rate shrinks the contribution of each tree by
179
+ ``learning_rate``.
180
+ k : int, default=0
181
+ The index of the estimator being updated.
182
+ """
183
+ # compute leaf for each sample in ``X``.
184
+ terminal_regions = tree.apply(X)
185
+
186
+ if not isinstance(loss, HalfSquaredError):
187
+ # mask all which are not in sample mask.
188
+ masked_terminal_regions = terminal_regions.copy()
189
+ masked_terminal_regions[~sample_mask] = -1
190
+
191
+ if isinstance(loss, HalfBinomialLoss):
192
+
193
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
194
+ # Make a single Newton-Raphson step, see "Additive Logistic Regression:
195
+ # A Statistical View of Boosting" FHT00 and note that we use a slightly
196
+ # different version (factor 2) of "F" with proba=expit(raw_prediction).
197
+ # Our node estimate is given by:
198
+ # sum(w * (y - prob)) / sum(w * prob * (1 - prob))
199
+ # we take advantage that: y - prob = neg_gradient
200
+ neg_g = neg_gradient.take(indices, axis=0)
201
+ prob = y_ - neg_g
202
+ # numerator = negative gradient = y - prob
203
+ numerator = np.average(neg_g, weights=sw)
204
+ # denominator = hessian = prob * (1 - prob)
205
+ denominator = np.average(prob * (1 - prob), weights=sw)
206
+ return _safe_divide(numerator, denominator)
207
+
208
+ elif isinstance(loss, HalfMultinomialLoss):
209
+
210
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
211
+ # we take advantage that: y - prob = neg_gradient
212
+ neg_g = neg_gradient.take(indices, axis=0)
213
+ prob = y_ - neg_g
214
+ K = loss.n_classes
215
+ # numerator = negative gradient * (k - 1) / k
216
+ # Note: The factor (k - 1)/k appears in the original papers "Greedy
217
+ # Function Approximation" by Friedman and "Additive Logistic
218
+ # Regression" by Friedman, Hastie, Tibshirani. This factor is, however,
219
+ # wrong or at least arbitrary as it directly multiplies the
220
+ # learning_rate. We keep it for backward compatibility.
221
+ numerator = np.average(neg_g, weights=sw)
222
+ numerator *= (K - 1) / K
223
+ # denominator = (diagonal) hessian = prob * (1 - prob)
224
+ denominator = np.average(prob * (1 - prob), weights=sw)
225
+ return _safe_divide(numerator, denominator)
226
+
227
+ elif isinstance(loss, ExponentialLoss):
228
+
229
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
230
+ neg_g = neg_gradient.take(indices, axis=0)
231
+ # numerator = negative gradient = y * exp(-raw) - (1-y) * exp(raw)
232
+ numerator = np.average(neg_g, weights=sw)
233
+ # denominator = hessian = y * exp(-raw) + (1-y) * exp(raw)
234
+ # if y=0: hessian = exp(raw) = -neg_g
235
+ # y=1: hessian = exp(-raw) = neg_g
236
+ hessian = neg_g.copy()
237
+ hessian[y_ == 0] *= -1
238
+ denominator = np.average(hessian, weights=sw)
239
+ return _safe_divide(numerator, denominator)
240
+
241
+ else:
242
+
243
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
244
+ return loss.fit_intercept_only(
245
+ y_true=y_ - raw_prediction[indices, k],
246
+ sample_weight=sw,
247
+ )
248
+
249
+ # update each leaf (= perform line search)
250
+ for leaf in np.nonzero(tree.children_left == TREE_LEAF)[0]:
251
+ indices = np.nonzero(masked_terminal_regions == leaf)[
252
+ 0
253
+ ] # of terminal regions
254
+ y_ = y.take(indices, axis=0)
255
+ sw = None if sample_weight is None else sample_weight[indices]
256
+ update = compute_update(y_, indices, neg_gradient, raw_prediction, k)
257
+
258
+ # TODO: Multiply here by learning rate instead of everywhere else.
259
+ tree.value[leaf, 0, 0] = update
260
+
261
+ # update predictions (both in-bag and out-of-bag)
262
+ raw_prediction[:, k] += learning_rate * tree.value[:, 0, 0].take(
263
+ terminal_regions, axis=0
264
+ )
265
+
266
+
267
+ def set_huber_delta(loss, y_true, raw_prediction, sample_weight=None):
268
+ """Calculate and set self.closs.delta based on self.quantile."""
269
+ abserr = np.abs(y_true - raw_prediction.squeeze())
270
+ # sample_weight is always a ndarray, never None.
271
+ delta = _weighted_percentile(abserr, sample_weight, 100 * loss.quantile)
272
+ loss.closs.delta = float(delta)
273
+
274
+
275
+ class VerboseReporter:
276
+ """Reports verbose output to stdout.
277
+
278
+ Parameters
279
+ ----------
280
+ verbose : int
281
+ Verbosity level. If ``verbose==1`` output is printed once in a while
282
+ (when iteration mod verbose_mod is zero).; if larger than 1 then output
283
+ is printed for each update.
284
+ """
285
+
286
+ def __init__(self, verbose):
287
+ self.verbose = verbose
288
+
289
+ def init(self, est, begin_at_stage=0):
290
+ """Initialize reporter
291
+
292
+ Parameters
293
+ ----------
294
+ est : Estimator
295
+ The estimator
296
+
297
+ begin_at_stage : int, default=0
298
+ stage at which to begin reporting
299
+ """
300
+ # header fields and line format str
301
+ header_fields = ["Iter", "Train Loss"]
302
+ verbose_fmt = ["{iter:>10d}", "{train_score:>16.4f}"]
303
+ # do oob?
304
+ if est.subsample < 1:
305
+ header_fields.append("OOB Improve")
306
+ verbose_fmt.append("{oob_impr:>16.4f}")
307
+ header_fields.append("Remaining Time")
308
+ verbose_fmt.append("{remaining_time:>16s}")
309
+
310
+ # print the header line
311
+ print(("%10s " + "%16s " * (len(header_fields) - 1)) % tuple(header_fields))
312
+
313
+ self.verbose_fmt = " ".join(verbose_fmt)
314
+ # plot verbose info each time i % verbose_mod == 0
315
+ self.verbose_mod = 1
316
+ self.start_time = time()
317
+ self.begin_at_stage = begin_at_stage
318
+
319
+ def update(self, j, est):
320
+ """Update reporter with new iteration.
321
+
322
+ Parameters
323
+ ----------
324
+ j : int
325
+ The new iteration.
326
+ est : Estimator
327
+ The estimator.
328
+ """
329
+ do_oob = est.subsample < 1
330
+ # we need to take into account if we fit additional estimators.
331
+ i = j - self.begin_at_stage # iteration relative to the start iter
332
+ if (i + 1) % self.verbose_mod == 0:
333
+ oob_impr = est.oob_improvement_[j] if do_oob else 0
334
+ remaining_time = (
335
+ (est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)
336
+ )
337
+ if remaining_time > 60:
338
+ remaining_time = "{0:.2f}m".format(remaining_time / 60.0)
339
+ else:
340
+ remaining_time = "{0:.2f}s".format(remaining_time)
341
+ print(
342
+ self.verbose_fmt.format(
343
+ iter=j + 1,
344
+ train_score=est.train_score_[j],
345
+ oob_impr=oob_impr,
346
+ remaining_time=remaining_time,
347
+ )
348
+ )
349
+ if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
350
+ # adjust verbose frequency (powers of 10)
351
+ self.verbose_mod *= 10
352
+
353
+
354
+ class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta):
355
+ """Abstract base class for Gradient Boosting."""
356
+
357
+ _parameter_constraints: dict = {
358
+ **DecisionTreeRegressor._parameter_constraints,
359
+ "learning_rate": [Interval(Real, 0.0, None, closed="left")],
360
+ "n_estimators": [Interval(Integral, 1, None, closed="left")],
361
+ "criterion": [StrOptions({"friedman_mse", "squared_error"})],
362
+ "subsample": [Interval(Real, 0.0, 1.0, closed="right")],
363
+ "verbose": ["verbose"],
364
+ "warm_start": ["boolean"],
365
+ "validation_fraction": [Interval(Real, 0.0, 1.0, closed="neither")],
366
+ "n_iter_no_change": [Interval(Integral, 1, None, closed="left"), None],
367
+ "tol": [Interval(Real, 0.0, None, closed="left")],
368
+ }
369
+ _parameter_constraints.pop("splitter")
370
+ _parameter_constraints.pop("monotonic_cst")
371
+
372
+ @abstractmethod
373
+ def __init__(
374
+ self,
375
+ *,
376
+ loss,
377
+ learning_rate,
378
+ n_estimators,
379
+ criterion,
380
+ min_samples_split,
381
+ min_samples_leaf,
382
+ min_weight_fraction_leaf,
383
+ max_depth,
384
+ min_impurity_decrease,
385
+ init,
386
+ subsample,
387
+ max_features,
388
+ ccp_alpha,
389
+ random_state,
390
+ alpha=0.9,
391
+ verbose=0,
392
+ max_leaf_nodes=None,
393
+ warm_start=False,
394
+ validation_fraction=0.1,
395
+ n_iter_no_change=None,
396
+ tol=1e-4,
397
+ ):
398
+ self.n_estimators = n_estimators
399
+ self.learning_rate = learning_rate
400
+ self.loss = loss
401
+ self.criterion = criterion
402
+ self.min_samples_split = min_samples_split
403
+ self.min_samples_leaf = min_samples_leaf
404
+ self.min_weight_fraction_leaf = min_weight_fraction_leaf
405
+ self.subsample = subsample
406
+ self.max_features = max_features
407
+ self.max_depth = max_depth
408
+ self.min_impurity_decrease = min_impurity_decrease
409
+ self.ccp_alpha = ccp_alpha
410
+ self.init = init
411
+ self.random_state = random_state
412
+ self.alpha = alpha
413
+ self.verbose = verbose
414
+ self.max_leaf_nodes = max_leaf_nodes
415
+ self.warm_start = warm_start
416
+ self.validation_fraction = validation_fraction
417
+ self.n_iter_no_change = n_iter_no_change
418
+ self.tol = tol
419
+
420
+ @abstractmethod
421
+ def _encode_y(self, y=None, sample_weight=None):
422
+ """Called by fit to validate and encode y."""
423
+
424
+ @abstractmethod
425
+ def _get_loss(self, sample_weight):
426
+ """Get loss object from sklearn._loss.loss."""
427
+
428
+ def _fit_stage(
429
+ self,
430
+ i,
431
+ X,
432
+ y,
433
+ raw_predictions,
434
+ sample_weight,
435
+ sample_mask,
436
+ random_state,
437
+ X_csc=None,
438
+ X_csr=None,
439
+ ):
440
+ """Fit another stage of ``n_trees_per_iteration_`` trees."""
441
+ original_y = y
442
+
443
+ if isinstance(self._loss, HuberLoss):
444
+ set_huber_delta(
445
+ loss=self._loss,
446
+ y_true=y,
447
+ raw_prediction=raw_predictions,
448
+ sample_weight=sample_weight,
449
+ )
450
+ # TODO: Without oob, i.e. with self.subsample = 1.0, we could call
451
+ # self._loss.loss_gradient and use it to set train_score_.
452
+ # But note that train_score_[i] is the score AFTER fitting the i-th tree.
453
+ # Note: We need the negative gradient!
454
+ neg_gradient = -self._loss.gradient(
455
+ y_true=y,
456
+ raw_prediction=raw_predictions,
457
+ sample_weight=None, # We pass sample_weights to the tree directly.
458
+ )
459
+ # 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
460
+ # on neg_gradient to simplify the loop over n_trees_per_iteration_.
461
+ if neg_gradient.ndim == 1:
462
+ neg_g_view = neg_gradient.reshape((-1, 1))
463
+ else:
464
+ neg_g_view = neg_gradient
465
+
466
+ for k in range(self.n_trees_per_iteration_):
467
+ if self._loss.is_multiclass:
468
+ y = np.array(original_y == k, dtype=np.float64)
469
+
470
+ # induce regression tree on the negative gradient
471
+ tree = DecisionTreeRegressor(
472
+ criterion=self.criterion,
473
+ splitter="best",
474
+ max_depth=self.max_depth,
475
+ min_samples_split=self.min_samples_split,
476
+ min_samples_leaf=self.min_samples_leaf,
477
+ min_weight_fraction_leaf=self.min_weight_fraction_leaf,
478
+ min_impurity_decrease=self.min_impurity_decrease,
479
+ max_features=self.max_features,
480
+ max_leaf_nodes=self.max_leaf_nodes,
481
+ random_state=random_state,
482
+ ccp_alpha=self.ccp_alpha,
483
+ )
484
+
485
+ if self.subsample < 1.0:
486
+ # no inplace multiplication!
487
+ sample_weight = sample_weight * sample_mask.astype(np.float64)
488
+
489
+ X = X_csc if X_csc is not None else X
490
+ tree.fit(
491
+ X, neg_g_view[:, k], sample_weight=sample_weight, check_input=False
492
+ )
493
+
494
+ # update tree leaves
495
+ X_for_tree_update = X_csr if X_csr is not None else X
496
+ _update_terminal_regions(
497
+ self._loss,
498
+ tree.tree_,
499
+ X_for_tree_update,
500
+ y,
501
+ neg_g_view[:, k],
502
+ raw_predictions,
503
+ sample_weight,
504
+ sample_mask,
505
+ learning_rate=self.learning_rate,
506
+ k=k,
507
+ )
508
+
509
+ # add tree to ensemble
510
+ self.estimators_[i, k] = tree
511
+
512
+ return raw_predictions
513
+
514
+ def _set_max_features(self):
515
+ """Set self.max_features_."""
516
+ if isinstance(self.max_features, str):
517
+ if self.max_features == "auto":
518
+ if is_classifier(self):
519
+ max_features = max(1, int(np.sqrt(self.n_features_in_)))
520
+ else:
521
+ max_features = self.n_features_in_
522
+ elif self.max_features == "sqrt":
523
+ max_features = max(1, int(np.sqrt(self.n_features_in_)))
524
+ else: # self.max_features == "log2"
525
+ max_features = max(1, int(np.log2(self.n_features_in_)))
526
+ elif self.max_features is None:
527
+ max_features = self.n_features_in_
528
+ elif isinstance(self.max_features, Integral):
529
+ max_features = self.max_features
530
+ else: # float
531
+ max_features = max(1, int(self.max_features * self.n_features_in_))
532
+
533
+ self.max_features_ = max_features
534
+
535
+ def _init_state(self):
536
+ """Initialize model state and allocate model state data structures."""
537
+
538
+ self.init_ = self.init
539
+ if self.init_ is None:
540
+ if is_classifier(self):
541
+ self.init_ = DummyClassifier(strategy="prior")
542
+ elif isinstance(self._loss, (AbsoluteError, HuberLoss)):
543
+ self.init_ = DummyRegressor(strategy="quantile", quantile=0.5)
544
+ elif isinstance(self._loss, PinballLoss):
545
+ self.init_ = DummyRegressor(strategy="quantile", quantile=self.alpha)
546
+ else:
547
+ self.init_ = DummyRegressor(strategy="mean")
548
+
549
+ self.estimators_ = np.empty(
550
+ (self.n_estimators, self.n_trees_per_iteration_), dtype=object
551
+ )
552
+ self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
553
+ # do oob?
554
+ if self.subsample < 1.0:
555
+ self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64)
556
+ self.oob_scores_ = np.zeros((self.n_estimators), dtype=np.float64)
557
+ self.oob_score_ = np.nan
558
+
559
+ def _clear_state(self):
560
+ """Clear the state of the gradient boosting model."""
561
+ if hasattr(self, "estimators_"):
562
+ self.estimators_ = np.empty((0, 0), dtype=object)
563
+ if hasattr(self, "train_score_"):
564
+ del self.train_score_
565
+ if hasattr(self, "oob_improvement_"):
566
+ del self.oob_improvement_
567
+ if hasattr(self, "oob_scores_"):
568
+ del self.oob_scores_
569
+ if hasattr(self, "oob_score_"):
570
+ del self.oob_score_
571
+ if hasattr(self, "init_"):
572
+ del self.init_
573
+ if hasattr(self, "_rng"):
574
+ del self._rng
575
+
576
+ def _resize_state(self):
577
+ """Add additional ``n_estimators`` entries to all attributes."""
578
+ # self.n_estimators is the number of additional est to fit
579
+ total_n_estimators = self.n_estimators
580
+ if total_n_estimators < self.estimators_.shape[0]:
581
+ raise ValueError(
582
+ "resize with smaller n_estimators %d < %d"
583
+ % (total_n_estimators, self.estimators_[0])
584
+ )
585
+
586
+ self.estimators_ = np.resize(
587
+ self.estimators_, (total_n_estimators, self.n_trees_per_iteration_)
588
+ )
589
+ self.train_score_ = np.resize(self.train_score_, total_n_estimators)
590
+ if self.subsample < 1 or hasattr(self, "oob_improvement_"):
591
+ # if do oob resize arrays or create new if not available
592
+ if hasattr(self, "oob_improvement_"):
593
+ self.oob_improvement_ = np.resize(
594
+ self.oob_improvement_, total_n_estimators
595
+ )
596
+ self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
597
+ self.oob_score_ = np.nan
598
+ else:
599
+ self.oob_improvement_ = np.zeros(
600
+ (total_n_estimators,), dtype=np.float64
601
+ )
602
+ self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
603
+ self.oob_score_ = np.nan
604
+
605
+ def _is_fitted(self):
606
+ return len(getattr(self, "estimators_", [])) > 0
607
+
608
+ def _check_initialized(self):
609
+ """Check that the estimator is initialized, raising an error if not."""
610
+ check_is_fitted(self)
611
+
612
+ @_fit_context(
613
+ # GradientBoosting*.init is not validated yet
614
+ prefer_skip_nested_validation=False
615
+ )
616
+ def fit(self, X, y, sample_weight=None, monitor=None):
617
+ """Fit the gradient boosting model.
618
+
619
+ Parameters
620
+ ----------
621
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
622
+ The input samples. Internally, it will be converted to
623
+ ``dtype=np.float32`` and if a sparse matrix is provided
624
+ to a sparse ``csr_matrix``.
625
+
626
+ y : array-like of shape (n_samples,)
627
+ Target values (strings or integers in classification, real numbers
628
+ in regression)
629
+ For classification, labels must correspond to classes.
630
+
631
+ sample_weight : array-like of shape (n_samples,), default=None
632
+ Sample weights. If None, then samples are equally weighted. Splits
633
+ that would create child nodes with net zero or negative weight are
634
+ ignored while searching for a split in each node. In the case of
635
+ classification, splits are also ignored if they would result in any
636
+ single class carrying a negative weight in either child node.
637
+
638
+ monitor : callable, default=None
639
+ The monitor is called after each iteration with the current
640
+ iteration, a reference to the estimator and the local variables of
641
+ ``_fit_stages`` as keyword arguments ``callable(i, self,
642
+ locals())``. If the callable returns ``True`` the fitting procedure
643
+ is stopped. The monitor can be used for various things such as
644
+ computing held-out estimates, early stopping, model introspect, and
645
+ snapshotting.
646
+
647
+ Returns
648
+ -------
649
+ self : object
650
+ Fitted estimator.
651
+ """
652
+ if not self.warm_start:
653
+ self._clear_state()
654
+
655
+ # Check input
656
+ # Since check_array converts both X and y to the same dtype, but the
657
+ # trees use different types for X and y, checking them separately.
658
+
659
+ X, y = self._validate_data(
660
+ X, y, accept_sparse=["csr", "csc", "coo"], dtype=DTYPE, multi_output=True
661
+ )
662
+ sample_weight_is_none = sample_weight is None
663
+ sample_weight = _check_sample_weight(sample_weight, X)
664
+ if sample_weight_is_none:
665
+ y = self._encode_y(y=y, sample_weight=None)
666
+ else:
667
+ y = self._encode_y(y=y, sample_weight=sample_weight)
668
+ y = column_or_1d(y, warn=True) # TODO: Is this still required?
669
+
670
+ self._set_max_features()
671
+
672
+ # self.loss is guaranteed to be a string
673
+ self._loss = self._get_loss(sample_weight=sample_weight)
674
+
675
+ if self.n_iter_no_change is not None:
676
+ stratify = y if is_classifier(self) else None
677
+ (
678
+ X_train,
679
+ X_val,
680
+ y_train,
681
+ y_val,
682
+ sample_weight_train,
683
+ sample_weight_val,
684
+ ) = train_test_split(
685
+ X,
686
+ y,
687
+ sample_weight,
688
+ random_state=self.random_state,
689
+ test_size=self.validation_fraction,
690
+ stratify=stratify,
691
+ )
692
+ if is_classifier(self):
693
+ if self.n_classes_ != np.unique(y_train).shape[0]:
694
+ # We choose to error here. The problem is that the init
695
+ # estimator would be trained on y, which has some missing
696
+ # classes now, so its predictions would not have the
697
+ # correct shape.
698
+ raise ValueError(
699
+ "The training data after the early stopping split "
700
+ "is missing some classes. Try using another random "
701
+ "seed."
702
+ )
703
+ else:
704
+ X_train, y_train, sample_weight_train = X, y, sample_weight
705
+ X_val = y_val = sample_weight_val = None
706
+
707
+ n_samples = X_train.shape[0]
708
+
709
+ # First time calling fit.
710
+ if not self._is_fitted():
711
+ # init state
712
+ self._init_state()
713
+
714
+ # fit initial model and initialize raw predictions
715
+ if self.init_ == "zero":
716
+ raw_predictions = np.zeros(
717
+ shape=(n_samples, self.n_trees_per_iteration_),
718
+ dtype=np.float64,
719
+ )
720
+ else:
721
+ # XXX clean this once we have a support_sample_weight tag
722
+ if sample_weight_is_none:
723
+ self.init_.fit(X_train, y_train)
724
+ else:
725
+ msg = (
726
+ "The initial estimator {} does not support sample "
727
+ "weights.".format(self.init_.__class__.__name__)
728
+ )
729
+ try:
730
+ self.init_.fit(
731
+ X_train, y_train, sample_weight=sample_weight_train
732
+ )
733
+ except TypeError as e:
734
+ if "unexpected keyword argument 'sample_weight'" in str(e):
735
+ # regular estimator without SW support
736
+ raise ValueError(msg) from e
737
+ else: # regular estimator whose input checking failed
738
+ raise
739
+ except ValueError as e:
740
+ if (
741
+ "pass parameters to specific steps of "
742
+ "your pipeline using the "
743
+ "stepname__parameter"
744
+ in str(e)
745
+ ): # pipeline
746
+ raise ValueError(msg) from e
747
+ else: # regular estimator whose input checking failed
748
+ raise
749
+
750
+ raw_predictions = _init_raw_predictions(
751
+ X_train, self.init_, self._loss, is_classifier(self)
752
+ )
753
+
754
+ begin_at_stage = 0
755
+
756
+ # The rng state must be preserved if warm_start is True
757
+ self._rng = check_random_state(self.random_state)
758
+
759
+ # warm start: this is not the first time fit was called
760
+ else:
761
+ # add more estimators to fitted model
762
+ # invariant: warm_start = True
763
+ if self.n_estimators < self.estimators_.shape[0]:
764
+ raise ValueError(
765
+ "n_estimators=%d must be larger or equal to "
766
+ "estimators_.shape[0]=%d when "
767
+ "warm_start==True" % (self.n_estimators, self.estimators_.shape[0])
768
+ )
769
+ begin_at_stage = self.estimators_.shape[0]
770
+ # The requirements of _raw_predict
771
+ # are more constrained than fit. It accepts only CSR
772
+ # matrices. Finite values have already been checked in _validate_data.
773
+ X_train = check_array(
774
+ X_train,
775
+ dtype=DTYPE,
776
+ order="C",
777
+ accept_sparse="csr",
778
+ force_all_finite=False,
779
+ )
780
+ raw_predictions = self._raw_predict(X_train)
781
+ self._resize_state()
782
+
783
+ # fit the boosting stages
784
+ n_stages = self._fit_stages(
785
+ X_train,
786
+ y_train,
787
+ raw_predictions,
788
+ sample_weight_train,
789
+ self._rng,
790
+ X_val,
791
+ y_val,
792
+ sample_weight_val,
793
+ begin_at_stage,
794
+ monitor,
795
+ )
796
+
797
+ # change shape of arrays after fit (early-stopping or additional ests)
798
+ if n_stages != self.estimators_.shape[0]:
799
+ self.estimators_ = self.estimators_[:n_stages]
800
+ self.train_score_ = self.train_score_[:n_stages]
801
+ if hasattr(self, "oob_improvement_"):
802
+ # OOB scores were computed
803
+ self.oob_improvement_ = self.oob_improvement_[:n_stages]
804
+ self.oob_scores_ = self.oob_scores_[:n_stages]
805
+ self.oob_score_ = self.oob_scores_[-1]
806
+ self.n_estimators_ = n_stages
807
+ return self
808
+
809
+ def _fit_stages(
810
+ self,
811
+ X,
812
+ y,
813
+ raw_predictions,
814
+ sample_weight,
815
+ random_state,
816
+ X_val,
817
+ y_val,
818
+ sample_weight_val,
819
+ begin_at_stage=0,
820
+ monitor=None,
821
+ ):
822
+ """Iteratively fits the stages.
823
+
824
+ For each stage it computes the progress (OOB, train score)
825
+ and delegates to ``_fit_stage``.
826
+ Returns the number of stages fit; might differ from ``n_estimators``
827
+ due to early stopping.
828
+ """
829
+ n_samples = X.shape[0]
830
+ do_oob = self.subsample < 1.0
831
+ sample_mask = np.ones((n_samples,), dtype=bool)
832
+ n_inbag = max(1, int(self.subsample * n_samples))
833
+
834
+ if self.verbose:
835
+ verbose_reporter = VerboseReporter(verbose=self.verbose)
836
+ verbose_reporter.init(self, begin_at_stage)
837
+
838
+ X_csc = csc_matrix(X) if issparse(X) else None
839
+ X_csr = csr_matrix(X) if issparse(X) else None
840
+
841
+ if self.n_iter_no_change is not None:
842
+ loss_history = np.full(self.n_iter_no_change, np.inf)
843
+ # We create a generator to get the predictions for X_val after
844
+ # the addition of each successive stage
845
+ y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False)
846
+
847
+ # Older versions of GBT had its own loss functions. With the new common
848
+ # private loss function submodule _loss, we often are a factor of 2
849
+ # away from the old version. Here we keep backward compatibility for
850
+ # oob_scores_ and oob_improvement_, even if the old way is quite
851
+ # inconsistent (sometimes the gradient is half the gradient, sometimes
852
+ # not).
853
+ if isinstance(
854
+ self._loss,
855
+ (
856
+ HalfSquaredError,
857
+ HalfBinomialLoss,
858
+ ),
859
+ ):
860
+ factor = 2
861
+ else:
862
+ factor = 1
863
+
864
+ # perform boosting iterations
865
+ i = begin_at_stage
866
+ for i in range(begin_at_stage, self.n_estimators):
867
+ # subsampling
868
+ if do_oob:
869
+ sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
870
+ y_oob_masked = y[~sample_mask]
871
+ sample_weight_oob_masked = sample_weight[~sample_mask]
872
+ if i == 0: # store the initial loss to compute the OOB score
873
+ initial_loss = factor * self._loss(
874
+ y_true=y_oob_masked,
875
+ raw_prediction=raw_predictions[~sample_mask],
876
+ sample_weight=sample_weight_oob_masked,
877
+ )
878
+
879
+ # fit next stage of trees
880
+ raw_predictions = self._fit_stage(
881
+ i,
882
+ X,
883
+ y,
884
+ raw_predictions,
885
+ sample_weight,
886
+ sample_mask,
887
+ random_state,
888
+ X_csc=X_csc,
889
+ X_csr=X_csr,
890
+ )
891
+
892
+ # track loss
893
+ if do_oob:
894
+ self.train_score_[i] = factor * self._loss(
895
+ y_true=y[sample_mask],
896
+ raw_prediction=raw_predictions[sample_mask],
897
+ sample_weight=sample_weight[sample_mask],
898
+ )
899
+ self.oob_scores_[i] = factor * self._loss(
900
+ y_true=y_oob_masked,
901
+ raw_prediction=raw_predictions[~sample_mask],
902
+ sample_weight=sample_weight_oob_masked,
903
+ )
904
+ previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1]
905
+ self.oob_improvement_[i] = previous_loss - self.oob_scores_[i]
906
+ self.oob_score_ = self.oob_scores_[-1]
907
+ else:
908
+ # no need to fancy index w/ no subsampling
909
+ self.train_score_[i] = factor * self._loss(
910
+ y_true=y,
911
+ raw_prediction=raw_predictions,
912
+ sample_weight=sample_weight,
913
+ )
914
+
915
+ if self.verbose > 0:
916
+ verbose_reporter.update(i, self)
917
+
918
+ if monitor is not None:
919
+ early_stopping = monitor(i, self, locals())
920
+ if early_stopping:
921
+ break
922
+
923
+ # We also provide an early stopping based on the score from
924
+ # validation set (X_val, y_val), if n_iter_no_change is set
925
+ if self.n_iter_no_change is not None:
926
+ # By calling next(y_val_pred_iter), we get the predictions
927
+ # for X_val after the addition of the current stage
928
+ validation_loss = factor * self._loss(
929
+ y_val, next(y_val_pred_iter), sample_weight_val
930
+ )
931
+
932
+ # Require validation_score to be better (less) than at least
933
+ # one of the last n_iter_no_change evaluations
934
+ if np.any(validation_loss + self.tol < loss_history):
935
+ loss_history[i % len(loss_history)] = validation_loss
936
+ else:
937
+ break
938
+
939
+ return i + 1
940
+
941
+ def _make_estimator(self, append=True):
942
+ # we don't need _make_estimator
943
+ raise NotImplementedError()
944
+
945
+ def _raw_predict_init(self, X):
946
+ """Check input and compute raw predictions of the init estimator."""
947
+ self._check_initialized()
948
+ X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
949
+ if self.init_ == "zero":
950
+ raw_predictions = np.zeros(
951
+ shape=(X.shape[0], self.n_trees_per_iteration_), dtype=np.float64
952
+ )
953
+ else:
954
+ raw_predictions = _init_raw_predictions(
955
+ X, self.init_, self._loss, is_classifier(self)
956
+ )
957
+ return raw_predictions
958
+
959
+ def _raw_predict(self, X):
960
+ """Return the sum of the trees raw predictions (+ init estimator)."""
961
+ check_is_fitted(self)
962
+ raw_predictions = self._raw_predict_init(X)
963
+ predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
964
+ return raw_predictions
965
+
966
+ def _staged_raw_predict(self, X, check_input=True):
967
+ """Compute raw predictions of ``X`` for each iteration.
968
+
969
+ This method allows monitoring (i.e. determine error on testing set)
970
+ after each stage.
971
+
972
+ Parameters
973
+ ----------
974
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
975
+ The input samples. Internally, it will be converted to
976
+ ``dtype=np.float32`` and if a sparse matrix is provided
977
+ to a sparse ``csr_matrix``.
978
+
979
+ check_input : bool, default=True
980
+ If False, the input arrays X will not be checked.
981
+
982
+ Returns
983
+ -------
984
+ raw_predictions : generator of ndarray of shape (n_samples, k)
985
+ The raw predictions of the input samples. The order of the
986
+ classes corresponds to that in the attribute :term:`classes_`.
987
+ Regression and binary classification are special cases with
988
+ ``k == 1``, otherwise ``k==n_classes``.
989
+ """
990
+ if check_input:
991
+ X = self._validate_data(
992
+ X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
993
+ )
994
+ raw_predictions = self._raw_predict_init(X)
995
+ for i in range(self.estimators_.shape[0]):
996
+ predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions)
997
+ yield raw_predictions.copy()
998
+
999
+ @property
1000
+ def feature_importances_(self):
1001
+ """The impurity-based feature importances.
1002
+
1003
+ The higher, the more important the feature.
1004
+ The importance of a feature is computed as the (normalized)
1005
+ total reduction of the criterion brought by that feature. It is also
1006
+ known as the Gini importance.
1007
+
1008
+ Warning: impurity-based feature importances can be misleading for
1009
+ high cardinality features (many unique values). See
1010
+ :func:`sklearn.inspection.permutation_importance` as an alternative.
1011
+
1012
+ Returns
1013
+ -------
1014
+ feature_importances_ : ndarray of shape (n_features,)
1015
+ The values of this array sum to 1, unless all trees are single node
1016
+ trees consisting of only the root node, in which case it will be an
1017
+ array of zeros.
1018
+ """
1019
+ self._check_initialized()
1020
+
1021
+ relevant_trees = [
1022
+ tree
1023
+ for stage in self.estimators_
1024
+ for tree in stage
1025
+ if tree.tree_.node_count > 1
1026
+ ]
1027
+ if not relevant_trees:
1028
+ # degenerate case where all trees have only one node
1029
+ return np.zeros(shape=self.n_features_in_, dtype=np.float64)
1030
+
1031
+ relevant_feature_importances = [
1032
+ tree.tree_.compute_feature_importances(normalize=False)
1033
+ for tree in relevant_trees
1034
+ ]
1035
+ avg_feature_importances = np.mean(
1036
+ relevant_feature_importances, axis=0, dtype=np.float64
1037
+ )
1038
+ return avg_feature_importances / np.sum(avg_feature_importances)
1039
+
1040
+ def _compute_partial_dependence_recursion(self, grid, target_features):
1041
+ """Fast partial dependence computation.
1042
+
1043
+ Parameters
1044
+ ----------
1045
+ grid : ndarray of shape (n_samples, n_target_features)
1046
+ The grid points on which the partial dependence should be
1047
+ evaluated.
1048
+ target_features : ndarray of shape (n_target_features,)
1049
+ The set of target features for which the partial dependence
1050
+ should be evaluated.
1051
+
1052
+ Returns
1053
+ -------
1054
+ averaged_predictions : ndarray of shape \
1055
+ (n_trees_per_iteration_, n_samples)
1056
+ The value of the partial dependence function on each grid point.
1057
+ """
1058
+ if self.init is not None:
1059
+ warnings.warn(
1060
+ "Using recursion method with a non-constant init predictor "
1061
+ "will lead to incorrect partial dependence values. "
1062
+ "Got init=%s."
1063
+ % self.init,
1064
+ UserWarning,
1065
+ )
1066
+ grid = np.asarray(grid, dtype=DTYPE, order="C")
1067
+ n_estimators, n_trees_per_stage = self.estimators_.shape
1068
+ averaged_predictions = np.zeros(
1069
+ (n_trees_per_stage, grid.shape[0]), dtype=np.float64, order="C"
1070
+ )
1071
+ for stage in range(n_estimators):
1072
+ for k in range(n_trees_per_stage):
1073
+ tree = self.estimators_[stage, k].tree_
1074
+ tree.compute_partial_dependence(
1075
+ grid, target_features, averaged_predictions[k]
1076
+ )
1077
+ averaged_predictions *= self.learning_rate
1078
+
1079
+ return averaged_predictions
1080
+
1081
+ def apply(self, X):
1082
+ """Apply trees in the ensemble to X, return leaf indices.
1083
+
1084
+ .. versionadded:: 0.17
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1089
+ The input samples. Internally, its dtype will be converted to
1090
+ ``dtype=np.float32``. If a sparse matrix is provided, it will
1091
+ be converted to a sparse ``csr_matrix``.
1092
+
1093
+ Returns
1094
+ -------
1095
+ X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
1096
+ For each datapoint x in X and for each tree in the ensemble,
1097
+ return the index of the leaf x ends up in each estimator.
1098
+ In the case of binary classification n_classes is 1.
1099
+ """
1100
+
1101
+ self._check_initialized()
1102
+ X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
1103
+
1104
+ # n_classes will be equal to 1 in the binary classification or the
1105
+ # regression case.
1106
+ n_estimators, n_classes = self.estimators_.shape
1107
+ leaves = np.zeros((X.shape[0], n_estimators, n_classes))
1108
+
1109
+ for i in range(n_estimators):
1110
+ for j in range(n_classes):
1111
+ estimator = self.estimators_[i, j]
1112
+ leaves[:, i, j] = estimator.apply(X, check_input=False)
1113
+
1114
+ return leaves
1115
+
1116
+
1117
+ class GradientBoostingClassifier(ClassifierMixin, BaseGradientBoosting):
1118
+ """Gradient Boosting for classification.
1119
+
1120
+ This algorithm builds an additive model in a forward stage-wise fashion; it
1121
+ allows for the optimization of arbitrary differentiable loss functions. In
1122
+ each stage ``n_classes_`` regression trees are fit on the negative gradient
1123
+ of the loss function, e.g. binary or multiclass log loss. Binary
1124
+ classification is a special case where only a single regression tree is
1125
+ induced.
1126
+
1127
+ :class:`sklearn.ensemble.HistGradientBoostingClassifier` is a much faster
1128
+ variant of this algorithm for intermediate datasets (`n_samples >= 10_000`).
1129
+
1130
+ Read more in the :ref:`User Guide <gradient_boosting>`.
1131
+
1132
+ Parameters
1133
+ ----------
1134
+ loss : {'log_loss', 'exponential'}, default='log_loss'
1135
+ The loss function to be optimized. 'log_loss' refers to binomial and
1136
+ multinomial deviance, the same as used in logistic regression.
1137
+ It is a good choice for classification with probabilistic outputs.
1138
+ For loss 'exponential', gradient boosting recovers the AdaBoost algorithm.
1139
+
1140
+ learning_rate : float, default=0.1
1141
+ Learning rate shrinks the contribution of each tree by `learning_rate`.
1142
+ There is a trade-off between learning_rate and n_estimators.
1143
+ Values must be in the range `[0.0, inf)`.
1144
+
1145
+ n_estimators : int, default=100
1146
+ The number of boosting stages to perform. Gradient boosting
1147
+ is fairly robust to over-fitting so a large number usually
1148
+ results in better performance.
1149
+ Values must be in the range `[1, inf)`.
1150
+
1151
+ subsample : float, default=1.0
1152
+ The fraction of samples to be used for fitting the individual base
1153
+ learners. If smaller than 1.0 this results in Stochastic Gradient
1154
+ Boosting. `subsample` interacts with the parameter `n_estimators`.
1155
+ Choosing `subsample < 1.0` leads to a reduction of variance
1156
+ and an increase in bias.
1157
+ Values must be in the range `(0.0, 1.0]`.
1158
+
1159
+ criterion : {'friedman_mse', 'squared_error'}, default='friedman_mse'
1160
+ The function to measure the quality of a split. Supported criteria are
1161
+ 'friedman_mse' for the mean squared error with improvement score by
1162
+ Friedman, 'squared_error' for mean squared error. The default value of
1163
+ 'friedman_mse' is generally the best as it can provide a better
1164
+ approximation in some cases.
1165
+
1166
+ .. versionadded:: 0.18
1167
+
1168
+ min_samples_split : int or float, default=2
1169
+ The minimum number of samples required to split an internal node:
1170
+
1171
+ - If int, values must be in the range `[2, inf)`.
1172
+ - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`
1173
+ will be `ceil(min_samples_split * n_samples)`.
1174
+
1175
+ .. versionchanged:: 0.18
1176
+ Added float values for fractions.
1177
+
1178
+ min_samples_leaf : int or float, default=1
1179
+ The minimum number of samples required to be at a leaf node.
1180
+ A split point at any depth will only be considered if it leaves at
1181
+ least ``min_samples_leaf`` training samples in each of the left and
1182
+ right branches. This may have the effect of smoothing the model,
1183
+ especially in regression.
1184
+
1185
+ - If int, values must be in the range `[1, inf)`.
1186
+ - If float, values must be in the range `(0.0, 1.0)` and `min_samples_leaf`
1187
+ will be `ceil(min_samples_leaf * n_samples)`.
1188
+
1189
+ .. versionchanged:: 0.18
1190
+ Added float values for fractions.
1191
+
1192
+ min_weight_fraction_leaf : float, default=0.0
1193
+ The minimum weighted fraction of the sum total of weights (of all
1194
+ the input samples) required to be at a leaf node. Samples have
1195
+ equal weight when sample_weight is not provided.
1196
+ Values must be in the range `[0.0, 0.5]`.
1197
+
1198
+ max_depth : int or None, default=3
1199
+ Maximum depth of the individual regression estimators. The maximum
1200
+ depth limits the number of nodes in the tree. Tune this parameter
1201
+ for best performance; the best value depends on the interaction
1202
+ of the input variables. If None, then nodes are expanded until
1203
+ all leaves are pure or until all leaves contain less than
1204
+ min_samples_split samples.
1205
+ If int, values must be in the range `[1, inf)`.
1206
+
1207
+ min_impurity_decrease : float, default=0.0
1208
+ A node will be split if this split induces a decrease of the impurity
1209
+ greater than or equal to this value.
1210
+ Values must be in the range `[0.0, inf)`.
1211
+
1212
+ The weighted impurity decrease equation is the following::
1213
+
1214
+ N_t / N * (impurity - N_t_R / N_t * right_impurity
1215
+ - N_t_L / N_t * left_impurity)
1216
+
1217
+ where ``N`` is the total number of samples, ``N_t`` is the number of
1218
+ samples at the current node, ``N_t_L`` is the number of samples in the
1219
+ left child, and ``N_t_R`` is the number of samples in the right child.
1220
+
1221
+ ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
1222
+ if ``sample_weight`` is passed.
1223
+
1224
+ .. versionadded:: 0.19
1225
+
1226
+ init : estimator or 'zero', default=None
1227
+ An estimator object that is used to compute the initial predictions.
1228
+ ``init`` has to provide :term:`fit` and :term:`predict_proba`. If
1229
+ 'zero', the initial raw predictions are set to zero. By default, a
1230
+ ``DummyEstimator`` predicting the classes priors is used.
1231
+
1232
+ random_state : int, RandomState instance or None, default=None
1233
+ Controls the random seed given to each Tree estimator at each
1234
+ boosting iteration.
1235
+ In addition, it controls the random permutation of the features at
1236
+ each split (see Notes for more details).
1237
+ It also controls the random splitting of the training data to obtain a
1238
+ validation set if `n_iter_no_change` is not None.
1239
+ Pass an int for reproducible output across multiple function calls.
1240
+ See :term:`Glossary <random_state>`.
1241
+
1242
+ max_features : {'sqrt', 'log2'}, int or float, default=None
1243
+ The number of features to consider when looking for the best split:
1244
+
1245
+ - If int, values must be in the range `[1, inf)`.
1246
+ - If float, values must be in the range `(0.0, 1.0]` and the features
1247
+ considered at each split will be `max(1, int(max_features * n_features_in_))`.
1248
+ - If 'sqrt', then `max_features=sqrt(n_features)`.
1249
+ - If 'log2', then `max_features=log2(n_features)`.
1250
+ - If None, then `max_features=n_features`.
1251
+
1252
+ Choosing `max_features < n_features` leads to a reduction of variance
1253
+ and an increase in bias.
1254
+
1255
+ Note: the search for a split does not stop until at least one
1256
+ valid partition of the node samples is found, even if it requires to
1257
+ effectively inspect more than ``max_features`` features.
1258
+
1259
+ verbose : int, default=0
1260
+ Enable verbose output. If 1 then it prints progress and performance
1261
+ once in a while (the more trees the lower the frequency). If greater
1262
+ than 1 then it prints progress and performance for every tree.
1263
+ Values must be in the range `[0, inf)`.
1264
+
1265
+ max_leaf_nodes : int, default=None
1266
+ Grow trees with ``max_leaf_nodes`` in best-first fashion.
1267
+ Best nodes are defined as relative reduction in impurity.
1268
+ Values must be in the range `[2, inf)`.
1269
+ If `None`, then unlimited number of leaf nodes.
1270
+
1271
+ warm_start : bool, default=False
1272
+ When set to ``True``, reuse the solution of the previous call to fit
1273
+ and add more estimators to the ensemble, otherwise, just erase the
1274
+ previous solution. See :term:`the Glossary <warm_start>`.
1275
+
1276
+ validation_fraction : float, default=0.1
1277
+ The proportion of training data to set aside as validation set for
1278
+ early stopping. Values must be in the range `(0.0, 1.0)`.
1279
+ Only used if ``n_iter_no_change`` is set to an integer.
1280
+
1281
+ .. versionadded:: 0.20
1282
+
1283
+ n_iter_no_change : int, default=None
1284
+ ``n_iter_no_change`` is used to decide if early stopping will be used
1285
+ to terminate training when validation score is not improving. By
1286
+ default it is set to None to disable early stopping. If set to a
1287
+ number, it will set aside ``validation_fraction`` size of the training
1288
+ data as validation and terminate training when validation score is not
1289
+ improving in all of the previous ``n_iter_no_change`` numbers of
1290
+ iterations. The split is stratified.
1291
+ Values must be in the range `[1, inf)`.
1292
+ See
1293
+ :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`.
1294
+
1295
+ .. versionadded:: 0.20
1296
+
1297
+ tol : float, default=1e-4
1298
+ Tolerance for the early stopping. When the loss is not improving
1299
+ by at least tol for ``n_iter_no_change`` iterations (if set to a
1300
+ number), the training stops.
1301
+ Values must be in the range `[0.0, inf)`.
1302
+
1303
+ .. versionadded:: 0.20
1304
+
1305
+ ccp_alpha : non-negative float, default=0.0
1306
+ Complexity parameter used for Minimal Cost-Complexity Pruning. The
1307
+ subtree with the largest cost complexity that is smaller than
1308
+ ``ccp_alpha`` will be chosen. By default, no pruning is performed.
1309
+ Values must be in the range `[0.0, inf)`.
1310
+ See :ref:`minimal_cost_complexity_pruning` for details.
1311
+
1312
+ .. versionadded:: 0.22
1313
+
1314
+ Attributes
1315
+ ----------
1316
+ n_estimators_ : int
1317
+ The number of estimators as selected by early stopping (if
1318
+ ``n_iter_no_change`` is specified). Otherwise it is set to
1319
+ ``n_estimators``.
1320
+
1321
+ .. versionadded:: 0.20
1322
+
1323
+ n_trees_per_iteration_ : int
1324
+ The number of trees that are built at each iteration. For binary classifiers,
1325
+ this is always 1.
1326
+
1327
+ .. versionadded:: 1.4.0
1328
+
1329
+ feature_importances_ : ndarray of shape (n_features,)
1330
+ The impurity-based feature importances.
1331
+ The higher, the more important the feature.
1332
+ The importance of a feature is computed as the (normalized)
1333
+ total reduction of the criterion brought by that feature. It is also
1334
+ known as the Gini importance.
1335
+
1336
+ Warning: impurity-based feature importances can be misleading for
1337
+ high cardinality features (many unique values). See
1338
+ :func:`sklearn.inspection.permutation_importance` as an alternative.
1339
+
1340
+ oob_improvement_ : ndarray of shape (n_estimators,)
1341
+ The improvement in loss on the out-of-bag samples
1342
+ relative to the previous iteration.
1343
+ ``oob_improvement_[0]`` is the improvement in
1344
+ loss of the first stage over the ``init`` estimator.
1345
+ Only available if ``subsample < 1.0``.
1346
+
1347
+ oob_scores_ : ndarray of shape (n_estimators,)
1348
+ The full history of the loss values on the out-of-bag
1349
+ samples. Only available if `subsample < 1.0`.
1350
+
1351
+ .. versionadded:: 1.3
1352
+
1353
+ oob_score_ : float
1354
+ The last value of the loss on the out-of-bag samples. It is
1355
+ the same as `oob_scores_[-1]`. Only available if `subsample < 1.0`.
1356
+
1357
+ .. versionadded:: 1.3
1358
+
1359
+ train_score_ : ndarray of shape (n_estimators,)
1360
+ The i-th score ``train_score_[i]`` is the loss of the
1361
+ model at iteration ``i`` on the in-bag sample.
1362
+ If ``subsample == 1`` this is the loss on the training data.
1363
+
1364
+ init_ : estimator
1365
+ The estimator that provides the initial predictions. Set via the ``init``
1366
+ argument.
1367
+
1368
+ estimators_ : ndarray of DecisionTreeRegressor of \
1369
+ shape (n_estimators, ``n_trees_per_iteration_``)
1370
+ The collection of fitted sub-estimators. ``n_trees_per_iteration_`` is 1 for
1371
+ binary classification, otherwise ``n_classes``.
1372
+
1373
+ classes_ : ndarray of shape (n_classes,)
1374
+ The classes labels.
1375
+
1376
+ n_features_in_ : int
1377
+ Number of features seen during :term:`fit`.
1378
+
1379
+ .. versionadded:: 0.24
1380
+
1381
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1382
+ Names of features seen during :term:`fit`. Defined only when `X`
1383
+ has feature names that are all strings.
1384
+
1385
+ .. versionadded:: 1.0
1386
+
1387
+ n_classes_ : int
1388
+ The number of classes.
1389
+
1390
+ max_features_ : int
1391
+ The inferred value of max_features.
1392
+
1393
+ See Also
1394
+ --------
1395
+ HistGradientBoostingClassifier : Histogram-based Gradient Boosting
1396
+ Classification Tree.
1397
+ sklearn.tree.DecisionTreeClassifier : A decision tree classifier.
1398
+ RandomForestClassifier : A meta-estimator that fits a number of decision
1399
+ tree classifiers on various sub-samples of the dataset and uses
1400
+ averaging to improve the predictive accuracy and control over-fitting.
1401
+ AdaBoostClassifier : A meta-estimator that begins by fitting a classifier
1402
+ on the original dataset and then fits additional copies of the
1403
+ classifier on the same dataset where the weights of incorrectly
1404
+ classified instances are adjusted such that subsequent classifiers
1405
+ focus more on difficult cases.
1406
+
1407
+ Notes
1408
+ -----
1409
+ The features are always randomly permuted at each split. Therefore,
1410
+ the best found split may vary, even with the same training data and
1411
+ ``max_features=n_features``, if the improvement of the criterion is
1412
+ identical for several splits enumerated during the search of the best
1413
+ split. To obtain a deterministic behaviour during fitting,
1414
+ ``random_state`` has to be fixed.
1415
+
1416
+ References
1417
+ ----------
1418
+ J. Friedman, Greedy Function Approximation: A Gradient Boosting
1419
+ Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
1420
+
1421
+ J. Friedman, Stochastic Gradient Boosting, 1999
1422
+
1423
+ T. Hastie, R. Tibshirani and J. Friedman.
1424
+ Elements of Statistical Learning Ed. 2, Springer, 2009.
1425
+
1426
+ Examples
1427
+ --------
1428
+ The following example shows how to fit a gradient boosting classifier with
1429
+ 100 decision stumps as weak learners.
1430
+
1431
+ >>> from sklearn.datasets import make_hastie_10_2
1432
+ >>> from sklearn.ensemble import GradientBoostingClassifier
1433
+
1434
+ >>> X, y = make_hastie_10_2(random_state=0)
1435
+ >>> X_train, X_test = X[:2000], X[2000:]
1436
+ >>> y_train, y_test = y[:2000], y[2000:]
1437
+
1438
+ >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
1439
+ ... max_depth=1, random_state=0).fit(X_train, y_train)
1440
+ >>> clf.score(X_test, y_test)
1441
+ 0.913...
1442
+ """
1443
+
1444
+ _parameter_constraints: dict = {
1445
+ **BaseGradientBoosting._parameter_constraints,
1446
+ "loss": [StrOptions({"log_loss", "exponential"})],
1447
+ "init": [StrOptions({"zero"}), None, HasMethods(["fit", "predict_proba"])],
1448
+ }
1449
+
1450
+ def __init__(
1451
+ self,
1452
+ *,
1453
+ loss="log_loss",
1454
+ learning_rate=0.1,
1455
+ n_estimators=100,
1456
+ subsample=1.0,
1457
+ criterion="friedman_mse",
1458
+ min_samples_split=2,
1459
+ min_samples_leaf=1,
1460
+ min_weight_fraction_leaf=0.0,
1461
+ max_depth=3,
1462
+ min_impurity_decrease=0.0,
1463
+ init=None,
1464
+ random_state=None,
1465
+ max_features=None,
1466
+ verbose=0,
1467
+ max_leaf_nodes=None,
1468
+ warm_start=False,
1469
+ validation_fraction=0.1,
1470
+ n_iter_no_change=None,
1471
+ tol=1e-4,
1472
+ ccp_alpha=0.0,
1473
+ ):
1474
+ super().__init__(
1475
+ loss=loss,
1476
+ learning_rate=learning_rate,
1477
+ n_estimators=n_estimators,
1478
+ criterion=criterion,
1479
+ min_samples_split=min_samples_split,
1480
+ min_samples_leaf=min_samples_leaf,
1481
+ min_weight_fraction_leaf=min_weight_fraction_leaf,
1482
+ max_depth=max_depth,
1483
+ init=init,
1484
+ subsample=subsample,
1485
+ max_features=max_features,
1486
+ random_state=random_state,
1487
+ verbose=verbose,
1488
+ max_leaf_nodes=max_leaf_nodes,
1489
+ min_impurity_decrease=min_impurity_decrease,
1490
+ warm_start=warm_start,
1491
+ validation_fraction=validation_fraction,
1492
+ n_iter_no_change=n_iter_no_change,
1493
+ tol=tol,
1494
+ ccp_alpha=ccp_alpha,
1495
+ )
1496
+
1497
+ def _encode_y(self, y, sample_weight):
1498
+ # encode classes into 0 ... n_classes - 1 and sets attributes classes_
1499
+ # and n_trees_per_iteration_
1500
+ check_classification_targets(y)
1501
+
1502
+ label_encoder = LabelEncoder()
1503
+ encoded_y_int = label_encoder.fit_transform(y)
1504
+ self.classes_ = label_encoder.classes_
1505
+ n_classes = self.classes_.shape[0]
1506
+ # only 1 tree for binary classification. For multiclass classification,
1507
+ # we build 1 tree per class.
1508
+ self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes
1509
+ encoded_y = encoded_y_int.astype(float, copy=False)
1510
+
1511
+ # From here on, it is additional to the HGBT case.
1512
+ # expose n_classes_ attribute
1513
+ self.n_classes_ = n_classes
1514
+ if sample_weight is None:
1515
+ n_trim_classes = n_classes
1516
+ else:
1517
+ n_trim_classes = np.count_nonzero(np.bincount(encoded_y_int, sample_weight))
1518
+
1519
+ if n_trim_classes < 2:
1520
+ raise ValueError(
1521
+ "y contains %d class after sample_weight "
1522
+ "trimmed classes with zero weights, while a "
1523
+ "minimum of 2 classes are required." % n_trim_classes
1524
+ )
1525
+ return encoded_y
1526
+
1527
+ def _get_loss(self, sample_weight):
1528
+ if self.loss == "log_loss":
1529
+ if self.n_classes_ == 2:
1530
+ return HalfBinomialLoss(sample_weight=sample_weight)
1531
+ else:
1532
+ return HalfMultinomialLoss(
1533
+ sample_weight=sample_weight, n_classes=self.n_classes_
1534
+ )
1535
+ elif self.loss == "exponential":
1536
+ if self.n_classes_ > 2:
1537
+ raise ValueError(
1538
+ f"loss='{self.loss}' is only suitable for a binary classification "
1539
+ f"problem, you have n_classes={self.n_classes_}. "
1540
+ "Please use loss='log_loss' instead."
1541
+ )
1542
+ else:
1543
+ return ExponentialLoss(sample_weight=sample_weight)
1544
+
1545
+ def decision_function(self, X):
1546
+ """Compute the decision function of ``X``.
1547
+
1548
+ Parameters
1549
+ ----------
1550
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1551
+ The input samples. Internally, it will be converted to
1552
+ ``dtype=np.float32`` and if a sparse matrix is provided
1553
+ to a sparse ``csr_matrix``.
1554
+
1555
+ Returns
1556
+ -------
1557
+ score : ndarray of shape (n_samples, n_classes) or (n_samples,)
1558
+ The decision function of the input samples, which corresponds to
1559
+ the raw values predicted from the trees of the ensemble . The
1560
+ order of the classes corresponds to that in the attribute
1561
+ :term:`classes_`. Regression and binary classification produce an
1562
+ array of shape (n_samples,).
1563
+ """
1564
+ X = self._validate_data(
1565
+ X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
1566
+ )
1567
+ raw_predictions = self._raw_predict(X)
1568
+ if raw_predictions.shape[1] == 1:
1569
+ return raw_predictions.ravel()
1570
+ return raw_predictions
1571
+
1572
+ def staged_decision_function(self, X):
1573
+ """Compute decision function of ``X`` for each iteration.
1574
+
1575
+ This method allows monitoring (i.e. determine error on testing set)
1576
+ after each stage.
1577
+
1578
+ Parameters
1579
+ ----------
1580
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1581
+ The input samples. Internally, it will be converted to
1582
+ ``dtype=np.float32`` and if a sparse matrix is provided
1583
+ to a sparse ``csr_matrix``.
1584
+
1585
+ Yields
1586
+ ------
1587
+ score : generator of ndarray of shape (n_samples, k)
1588
+ The decision function of the input samples, which corresponds to
1589
+ the raw values predicted from the trees of the ensemble . The
1590
+ classes corresponds to that in the attribute :term:`classes_`.
1591
+ Regression and binary classification are special cases with
1592
+ ``k == 1``, otherwise ``k==n_classes``.
1593
+ """
1594
+ yield from self._staged_raw_predict(X)
1595
+
1596
+ def predict(self, X):
1597
+ """Predict class for X.
1598
+
1599
+ Parameters
1600
+ ----------
1601
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1602
+ The input samples. Internally, it will be converted to
1603
+ ``dtype=np.float32`` and if a sparse matrix is provided
1604
+ to a sparse ``csr_matrix``.
1605
+
1606
+ Returns
1607
+ -------
1608
+ y : ndarray of shape (n_samples,)
1609
+ The predicted values.
1610
+ """
1611
+ raw_predictions = self.decision_function(X)
1612
+ if raw_predictions.ndim == 1: # decision_function already squeezed it
1613
+ encoded_classes = (raw_predictions >= 0).astype(int)
1614
+ else:
1615
+ encoded_classes = np.argmax(raw_predictions, axis=1)
1616
+ return self.classes_[encoded_classes]
1617
+
1618
+ def staged_predict(self, X):
1619
+ """Predict class at each stage for X.
1620
+
1621
+ This method allows monitoring (i.e. determine error on testing set)
1622
+ after each stage.
1623
+
1624
+ Parameters
1625
+ ----------
1626
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1627
+ The input samples. Internally, it will be converted to
1628
+ ``dtype=np.float32`` and if a sparse matrix is provided
1629
+ to a sparse ``csr_matrix``.
1630
+
1631
+ Yields
1632
+ ------
1633
+ y : generator of ndarray of shape (n_samples,)
1634
+ The predicted value of the input samples.
1635
+ """
1636
+ if self.n_classes_ == 2: # n_trees_per_iteration_ = 1
1637
+ for raw_predictions in self._staged_raw_predict(X):
1638
+ encoded_classes = (raw_predictions.squeeze() >= 0).astype(int)
1639
+ yield self.classes_.take(encoded_classes, axis=0)
1640
+ else:
1641
+ for raw_predictions in self._staged_raw_predict(X):
1642
+ encoded_classes = np.argmax(raw_predictions, axis=1)
1643
+ yield self.classes_.take(encoded_classes, axis=0)
1644
+
1645
+ def predict_proba(self, X):
1646
+ """Predict class probabilities for X.
1647
+
1648
+ Parameters
1649
+ ----------
1650
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1651
+ The input samples. Internally, it will be converted to
1652
+ ``dtype=np.float32`` and if a sparse matrix is provided
1653
+ to a sparse ``csr_matrix``.
1654
+
1655
+ Returns
1656
+ -------
1657
+ p : ndarray of shape (n_samples, n_classes)
1658
+ The class probabilities of the input samples. The order of the
1659
+ classes corresponds to that in the attribute :term:`classes_`.
1660
+
1661
+ Raises
1662
+ ------
1663
+ AttributeError
1664
+ If the ``loss`` does not support probabilities.
1665
+ """
1666
+ raw_predictions = self.decision_function(X)
1667
+ return self._loss.predict_proba(raw_predictions)
1668
+
1669
+ def predict_log_proba(self, X):
1670
+ """Predict class log-probabilities for X.
1671
+
1672
+ Parameters
1673
+ ----------
1674
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1675
+ The input samples. Internally, it will be converted to
1676
+ ``dtype=np.float32`` and if a sparse matrix is provided
1677
+ to a sparse ``csr_matrix``.
1678
+
1679
+ Returns
1680
+ -------
1681
+ p : ndarray of shape (n_samples, n_classes)
1682
+ The class log-probabilities of the input samples. The order of the
1683
+ classes corresponds to that in the attribute :term:`classes_`.
1684
+
1685
+ Raises
1686
+ ------
1687
+ AttributeError
1688
+ If the ``loss`` does not support probabilities.
1689
+ """
1690
+ proba = self.predict_proba(X)
1691
+ return np.log(proba)
1692
+
1693
+ def staged_predict_proba(self, X):
1694
+ """Predict class probabilities at each stage for X.
1695
+
1696
+ This method allows monitoring (i.e. determine error on testing set)
1697
+ after each stage.
1698
+
1699
+ Parameters
1700
+ ----------
1701
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1702
+ The input samples. Internally, it will be converted to
1703
+ ``dtype=np.float32`` and if a sparse matrix is provided
1704
+ to a sparse ``csr_matrix``.
1705
+
1706
+ Yields
1707
+ ------
1708
+ y : generator of ndarray of shape (n_samples,)
1709
+ The predicted value of the input samples.
1710
+ """
1711
+ try:
1712
+ for raw_predictions in self._staged_raw_predict(X):
1713
+ yield self._loss.predict_proba(raw_predictions)
1714
+ except NotFittedError:
1715
+ raise
1716
+ except AttributeError as e:
1717
+ raise AttributeError(
1718
+ "loss=%r does not support predict_proba" % self.loss
1719
+ ) from e
1720
+
1721
+
1722
+ class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting):
1723
+ """Gradient Boosting for regression.
1724
+
1725
+ This estimator builds an additive model in a forward stage-wise fashion; it
1726
+ allows for the optimization of arbitrary differentiable loss functions. In
1727
+ each stage a regression tree is fit on the negative gradient of the given
1728
+ loss function.
1729
+
1730
+ :class:`sklearn.ensemble.HistGradientBoostingRegressor` is a much faster
1731
+ variant of this algorithm for intermediate datasets (`n_samples >= 10_000`).
1732
+
1733
+ Read more in the :ref:`User Guide <gradient_boosting>`.
1734
+
1735
+ Parameters
1736
+ ----------
1737
+ loss : {'squared_error', 'absolute_error', 'huber', 'quantile'}, \
1738
+ default='squared_error'
1739
+ Loss function to be optimized. 'squared_error' refers to the squared
1740
+ error for regression. 'absolute_error' refers to the absolute error of
1741
+ regression and is a robust loss function. 'huber' is a
1742
+ combination of the two. 'quantile' allows quantile regression (use
1743
+ `alpha` to specify the quantile).
1744
+
1745
+ learning_rate : float, default=0.1
1746
+ Learning rate shrinks the contribution of each tree by `learning_rate`.
1747
+ There is a trade-off between learning_rate and n_estimators.
1748
+ Values must be in the range `[0.0, inf)`.
1749
+
1750
+ n_estimators : int, default=100
1751
+ The number of boosting stages to perform. Gradient boosting
1752
+ is fairly robust to over-fitting so a large number usually
1753
+ results in better performance.
1754
+ Values must be in the range `[1, inf)`.
1755
+
1756
+ subsample : float, default=1.0
1757
+ The fraction of samples to be used for fitting the individual base
1758
+ learners. If smaller than 1.0 this results in Stochastic Gradient
1759
+ Boosting. `subsample` interacts with the parameter `n_estimators`.
1760
+ Choosing `subsample < 1.0` leads to a reduction of variance
1761
+ and an increase in bias.
1762
+ Values must be in the range `(0.0, 1.0]`.
1763
+
1764
+ criterion : {'friedman_mse', 'squared_error'}, default='friedman_mse'
1765
+ The function to measure the quality of a split. Supported criteria are
1766
+ "friedman_mse" for the mean squared error with improvement score by
1767
+ Friedman, "squared_error" for mean squared error. The default value of
1768
+ "friedman_mse" is generally the best as it can provide a better
1769
+ approximation in some cases.
1770
+
1771
+ .. versionadded:: 0.18
1772
+
1773
+ min_samples_split : int or float, default=2
1774
+ The minimum number of samples required to split an internal node:
1775
+
1776
+ - If int, values must be in the range `[2, inf)`.
1777
+ - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`
1778
+ will be `ceil(min_samples_split * n_samples)`.
1779
+
1780
+ .. versionchanged:: 0.18
1781
+ Added float values for fractions.
1782
+
1783
+ min_samples_leaf : int or float, default=1
1784
+ The minimum number of samples required to be at a leaf node.
1785
+ A split point at any depth will only be considered if it leaves at
1786
+ least ``min_samples_leaf`` training samples in each of the left and
1787
+ right branches. This may have the effect of smoothing the model,
1788
+ especially in regression.
1789
+
1790
+ - If int, values must be in the range `[1, inf)`.
1791
+ - If float, values must be in the range `(0.0, 1.0)` and `min_samples_leaf`
1792
+ will be `ceil(min_samples_leaf * n_samples)`.
1793
+
1794
+ .. versionchanged:: 0.18
1795
+ Added float values for fractions.
1796
+
1797
+ min_weight_fraction_leaf : float, default=0.0
1798
+ The minimum weighted fraction of the sum total of weights (of all
1799
+ the input samples) required to be at a leaf node. Samples have
1800
+ equal weight when sample_weight is not provided.
1801
+ Values must be in the range `[0.0, 0.5]`.
1802
+
1803
+ max_depth : int or None, default=3
1804
+ Maximum depth of the individual regression estimators. The maximum
1805
+ depth limits the number of nodes in the tree. Tune this parameter
1806
+ for best performance; the best value depends on the interaction
1807
+ of the input variables. If None, then nodes are expanded until
1808
+ all leaves are pure or until all leaves contain less than
1809
+ min_samples_split samples.
1810
+ If int, values must be in the range `[1, inf)`.
1811
+
1812
+ min_impurity_decrease : float, default=0.0
1813
+ A node will be split if this split induces a decrease of the impurity
1814
+ greater than or equal to this value.
1815
+ Values must be in the range `[0.0, inf)`.
1816
+
1817
+ The weighted impurity decrease equation is the following::
1818
+
1819
+ N_t / N * (impurity - N_t_R / N_t * right_impurity
1820
+ - N_t_L / N_t * left_impurity)
1821
+
1822
+ where ``N`` is the total number of samples, ``N_t`` is the number of
1823
+ samples at the current node, ``N_t_L`` is the number of samples in the
1824
+ left child, and ``N_t_R`` is the number of samples in the right child.
1825
+
1826
+ ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
1827
+ if ``sample_weight`` is passed.
1828
+
1829
+ .. versionadded:: 0.19
1830
+
1831
+ init : estimator or 'zero', default=None
1832
+ An estimator object that is used to compute the initial predictions.
1833
+ ``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the
1834
+ initial raw predictions are set to zero. By default a
1835
+ ``DummyEstimator`` is used, predicting either the average target value
1836
+ (for loss='squared_error'), or a quantile for the other losses.
1837
+
1838
+ random_state : int, RandomState instance or None, default=None
1839
+ Controls the random seed given to each Tree estimator at each
1840
+ boosting iteration.
1841
+ In addition, it controls the random permutation of the features at
1842
+ each split (see Notes for more details).
1843
+ It also controls the random splitting of the training data to obtain a
1844
+ validation set if `n_iter_no_change` is not None.
1845
+ Pass an int for reproducible output across multiple function calls.
1846
+ See :term:`Glossary <random_state>`.
1847
+
1848
+ max_features : {'sqrt', 'log2'}, int or float, default=None
1849
+ The number of features to consider when looking for the best split:
1850
+
1851
+ - If int, values must be in the range `[1, inf)`.
1852
+ - If float, values must be in the range `(0.0, 1.0]` and the features
1853
+ considered at each split will be `max(1, int(max_features * n_features_in_))`.
1854
+ - If "sqrt", then `max_features=sqrt(n_features)`.
1855
+ - If "log2", then `max_features=log2(n_features)`.
1856
+ - If None, then `max_features=n_features`.
1857
+
1858
+ Choosing `max_features < n_features` leads to a reduction of variance
1859
+ and an increase in bias.
1860
+
1861
+ Note: the search for a split does not stop until at least one
1862
+ valid partition of the node samples is found, even if it requires to
1863
+ effectively inspect more than ``max_features`` features.
1864
+
1865
+ alpha : float, default=0.9
1866
+ The alpha-quantile of the huber loss function and the quantile
1867
+ loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
1868
+ Values must be in the range `(0.0, 1.0)`.
1869
+
1870
+ verbose : int, default=0
1871
+ Enable verbose output. If 1 then it prints progress and performance
1872
+ once in a while (the more trees the lower the frequency). If greater
1873
+ than 1 then it prints progress and performance for every tree.
1874
+ Values must be in the range `[0, inf)`.
1875
+
1876
+ max_leaf_nodes : int, default=None
1877
+ Grow trees with ``max_leaf_nodes`` in best-first fashion.
1878
+ Best nodes are defined as relative reduction in impurity.
1879
+ Values must be in the range `[2, inf)`.
1880
+ If None, then unlimited number of leaf nodes.
1881
+
1882
+ warm_start : bool, default=False
1883
+ When set to ``True``, reuse the solution of the previous call to fit
1884
+ and add more estimators to the ensemble, otherwise, just erase the
1885
+ previous solution. See :term:`the Glossary <warm_start>`.
1886
+
1887
+ validation_fraction : float, default=0.1
1888
+ The proportion of training data to set aside as validation set for
1889
+ early stopping. Values must be in the range `(0.0, 1.0)`.
1890
+ Only used if ``n_iter_no_change`` is set to an integer.
1891
+
1892
+ .. versionadded:: 0.20
1893
+
1894
+ n_iter_no_change : int, default=None
1895
+ ``n_iter_no_change`` is used to decide if early stopping will be used
1896
+ to terminate training when validation score is not improving. By
1897
+ default it is set to None to disable early stopping. If set to a
1898
+ number, it will set aside ``validation_fraction`` size of the training
1899
+ data as validation and terminate training when validation score is not
1900
+ improving in all of the previous ``n_iter_no_change`` numbers of
1901
+ iterations.
1902
+ Values must be in the range `[1, inf)`.
1903
+ See
1904
+ :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`.
1905
+
1906
+ .. versionadded:: 0.20
1907
+
1908
+ tol : float, default=1e-4
1909
+ Tolerance for the early stopping. When the loss is not improving
1910
+ by at least tol for ``n_iter_no_change`` iterations (if set to a
1911
+ number), the training stops.
1912
+ Values must be in the range `[0.0, inf)`.
1913
+
1914
+ .. versionadded:: 0.20
1915
+
1916
+ ccp_alpha : non-negative float, default=0.0
1917
+ Complexity parameter used for Minimal Cost-Complexity Pruning. The
1918
+ subtree with the largest cost complexity that is smaller than
1919
+ ``ccp_alpha`` will be chosen. By default, no pruning is performed.
1920
+ Values must be in the range `[0.0, inf)`.
1921
+ See :ref:`minimal_cost_complexity_pruning` for details.
1922
+
1923
+ .. versionadded:: 0.22
1924
+
1925
+ Attributes
1926
+ ----------
1927
+ n_estimators_ : int
1928
+ The number of estimators as selected by early stopping (if
1929
+ ``n_iter_no_change`` is specified). Otherwise it is set to
1930
+ ``n_estimators``.
1931
+
1932
+ n_trees_per_iteration_ : int
1933
+ The number of trees that are built at each iteration. For regressors, this is
1934
+ always 1.
1935
+
1936
+ .. versionadded:: 1.4.0
1937
+
1938
+ feature_importances_ : ndarray of shape (n_features,)
1939
+ The impurity-based feature importances.
1940
+ The higher, the more important the feature.
1941
+ The importance of a feature is computed as the (normalized)
1942
+ total reduction of the criterion brought by that feature. It is also
1943
+ known as the Gini importance.
1944
+
1945
+ Warning: impurity-based feature importances can be misleading for
1946
+ high cardinality features (many unique values). See
1947
+ :func:`sklearn.inspection.permutation_importance` as an alternative.
1948
+
1949
+ oob_improvement_ : ndarray of shape (n_estimators,)
1950
+ The improvement in loss on the out-of-bag samples
1951
+ relative to the previous iteration.
1952
+ ``oob_improvement_[0]`` is the improvement in
1953
+ loss of the first stage over the ``init`` estimator.
1954
+ Only available if ``subsample < 1.0``.
1955
+
1956
+ oob_scores_ : ndarray of shape (n_estimators,)
1957
+ The full history of the loss values on the out-of-bag
1958
+ samples. Only available if `subsample < 1.0`.
1959
+
1960
+ .. versionadded:: 1.3
1961
+
1962
+ oob_score_ : float
1963
+ The last value of the loss on the out-of-bag samples. It is
1964
+ the same as `oob_scores_[-1]`. Only available if `subsample < 1.0`.
1965
+
1966
+ .. versionadded:: 1.3
1967
+
1968
+ train_score_ : ndarray of shape (n_estimators,)
1969
+ The i-th score ``train_score_[i]`` is the loss of the
1970
+ model at iteration ``i`` on the in-bag sample.
1971
+ If ``subsample == 1`` this is the loss on the training data.
1972
+
1973
+ init_ : estimator
1974
+ The estimator that provides the initial predictions. Set via the ``init``
1975
+ argument.
1976
+
1977
+ estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, 1)
1978
+ The collection of fitted sub-estimators.
1979
+
1980
+ n_features_in_ : int
1981
+ Number of features seen during :term:`fit`.
1982
+
1983
+ .. versionadded:: 0.24
1984
+
1985
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1986
+ Names of features seen during :term:`fit`. Defined only when `X`
1987
+ has feature names that are all strings.
1988
+
1989
+ .. versionadded:: 1.0
1990
+
1991
+ max_features_ : int
1992
+ The inferred value of max_features.
1993
+
1994
+ See Also
1995
+ --------
1996
+ HistGradientBoostingRegressor : Histogram-based Gradient Boosting
1997
+ Classification Tree.
1998
+ sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
1999
+ sklearn.ensemble.RandomForestRegressor : A random forest regressor.
2000
+
2001
+ Notes
2002
+ -----
2003
+ The features are always randomly permuted at each split. Therefore,
2004
+ the best found split may vary, even with the same training data and
2005
+ ``max_features=n_features``, if the improvement of the criterion is
2006
+ identical for several splits enumerated during the search of the best
2007
+ split. To obtain a deterministic behaviour during fitting,
2008
+ ``random_state`` has to be fixed.
2009
+
2010
+ References
2011
+ ----------
2012
+ J. Friedman, Greedy Function Approximation: A Gradient Boosting
2013
+ Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
2014
+
2015
+ J. Friedman, Stochastic Gradient Boosting, 1999
2016
+
2017
+ T. Hastie, R. Tibshirani and J. Friedman.
2018
+ Elements of Statistical Learning Ed. 2, Springer, 2009.
2019
+
2020
+ Examples
2021
+ --------
2022
+ >>> from sklearn.datasets import make_regression
2023
+ >>> from sklearn.ensemble import GradientBoostingRegressor
2024
+ >>> from sklearn.model_selection import train_test_split
2025
+ >>> X, y = make_regression(random_state=0)
2026
+ >>> X_train, X_test, y_train, y_test = train_test_split(
2027
+ ... X, y, random_state=0)
2028
+ >>> reg = GradientBoostingRegressor(random_state=0)
2029
+ >>> reg.fit(X_train, y_train)
2030
+ GradientBoostingRegressor(random_state=0)
2031
+ >>> reg.predict(X_test[1:2])
2032
+ array([-61...])
2033
+ >>> reg.score(X_test, y_test)
2034
+ 0.4...
2035
+ """
2036
+
2037
+ _parameter_constraints: dict = {
2038
+ **BaseGradientBoosting._parameter_constraints,
2039
+ "loss": [StrOptions({"squared_error", "absolute_error", "huber", "quantile"})],
2040
+ "init": [StrOptions({"zero"}), None, HasMethods(["fit", "predict"])],
2041
+ "alpha": [Interval(Real, 0.0, 1.0, closed="neither")],
2042
+ }
2043
+
2044
+ def __init__(
2045
+ self,
2046
+ *,
2047
+ loss="squared_error",
2048
+ learning_rate=0.1,
2049
+ n_estimators=100,
2050
+ subsample=1.0,
2051
+ criterion="friedman_mse",
2052
+ min_samples_split=2,
2053
+ min_samples_leaf=1,
2054
+ min_weight_fraction_leaf=0.0,
2055
+ max_depth=3,
2056
+ min_impurity_decrease=0.0,
2057
+ init=None,
2058
+ random_state=None,
2059
+ max_features=None,
2060
+ alpha=0.9,
2061
+ verbose=0,
2062
+ max_leaf_nodes=None,
2063
+ warm_start=False,
2064
+ validation_fraction=0.1,
2065
+ n_iter_no_change=None,
2066
+ tol=1e-4,
2067
+ ccp_alpha=0.0,
2068
+ ):
2069
+ super().__init__(
2070
+ loss=loss,
2071
+ learning_rate=learning_rate,
2072
+ n_estimators=n_estimators,
2073
+ criterion=criterion,
2074
+ min_samples_split=min_samples_split,
2075
+ min_samples_leaf=min_samples_leaf,
2076
+ min_weight_fraction_leaf=min_weight_fraction_leaf,
2077
+ max_depth=max_depth,
2078
+ init=init,
2079
+ subsample=subsample,
2080
+ max_features=max_features,
2081
+ min_impurity_decrease=min_impurity_decrease,
2082
+ random_state=random_state,
2083
+ alpha=alpha,
2084
+ verbose=verbose,
2085
+ max_leaf_nodes=max_leaf_nodes,
2086
+ warm_start=warm_start,
2087
+ validation_fraction=validation_fraction,
2088
+ n_iter_no_change=n_iter_no_change,
2089
+ tol=tol,
2090
+ ccp_alpha=ccp_alpha,
2091
+ )
2092
+
2093
+ def _encode_y(self, y=None, sample_weight=None):
2094
+ # Just convert y to the expected dtype
2095
+ self.n_trees_per_iteration_ = 1
2096
+ y = y.astype(DOUBLE, copy=False)
2097
+ return y
2098
+
2099
+ def _get_loss(self, sample_weight):
2100
+ if self.loss in ("quantile", "huber"):
2101
+ return _LOSSES[self.loss](sample_weight=sample_weight, quantile=self.alpha)
2102
+ else:
2103
+ return _LOSSES[self.loss](sample_weight=sample_weight)
2104
+
2105
+ def predict(self, X):
2106
+ """Predict regression target for X.
2107
+
2108
+ Parameters
2109
+ ----------
2110
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2111
+ The input samples. Internally, it will be converted to
2112
+ ``dtype=np.float32`` and if a sparse matrix is provided
2113
+ to a sparse ``csr_matrix``.
2114
+
2115
+ Returns
2116
+ -------
2117
+ y : ndarray of shape (n_samples,)
2118
+ The predicted values.
2119
+ """
2120
+ X = self._validate_data(
2121
+ X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
2122
+ )
2123
+ # In regression we can directly return the raw value from the trees.
2124
+ return self._raw_predict(X).ravel()
2125
+
2126
+ def staged_predict(self, X):
2127
+ """Predict regression target at each stage for X.
2128
+
2129
+ This method allows monitoring (i.e. determine error on testing set)
2130
+ after each stage.
2131
+
2132
+ Parameters
2133
+ ----------
2134
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2135
+ The input samples. Internally, it will be converted to
2136
+ ``dtype=np.float32`` and if a sparse matrix is provided
2137
+ to a sparse ``csr_matrix``.
2138
+
2139
+ Yields
2140
+ ------
2141
+ y : generator of ndarray of shape (n_samples,)
2142
+ The predicted value of the input samples.
2143
+ """
2144
+ for raw_predictions in self._staged_raw_predict(X):
2145
+ yield raw_predictions.ravel()
2146
+
2147
+ def apply(self, X):
2148
+ """Apply trees in the ensemble to X, return leaf indices.
2149
+
2150
+ .. versionadded:: 0.17
2151
+
2152
+ Parameters
2153
+ ----------
2154
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2155
+ The input samples. Internally, its dtype will be converted to
2156
+ ``dtype=np.float32``. If a sparse matrix is provided, it will
2157
+ be converted to a sparse ``csr_matrix``.
2158
+
2159
+ Returns
2160
+ -------
2161
+ X_leaves : array-like of shape (n_samples, n_estimators)
2162
+ For each datapoint x in X and for each tree in the ensemble,
2163
+ return the index of the leaf x ends up in each estimator.
2164
+ """
2165
+
2166
+ leaves = super().apply(X)
2167
+ leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
2168
+ return leaves
venv/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (254 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """This module implements histogram-based gradient boosting estimators.
2
+
3
+ The implementation is a port from pygbm which is itself strongly inspired
4
+ from LightGBM.
5
+ """
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (221 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (221 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (250 kB). View file