applied-ai-018 commited on
Commit
8fa2e1f
·
verified ·
1 Parent(s): 8ec04f5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__init__.py +0 -0
  4. llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/test_pls.py +646 -0
  7. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__init__.py +52 -0
  8. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_base.py +193 -0
  21. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py +458 -0
  22. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py +409 -0
  23. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py +2443 -0
  24. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_pca.py +747 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py +0 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py +983 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py +116 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py +451 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py +452 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py +566 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py +1062 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py +477 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py +987 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py +367 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py +212 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__init__.py +88 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (337 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (206 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/test_pls.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+ import pytest
5
+ from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
6
+
7
+ from sklearn.cross_decomposition import CCA, PLSSVD, PLSCanonical, PLSRegression
8
+ from sklearn.cross_decomposition._pls import (
9
+ _center_scale_xy,
10
+ _get_first_singular_vectors_power_method,
11
+ _get_first_singular_vectors_svd,
12
+ _svd_flip_1d,
13
+ )
14
+ from sklearn.datasets import load_linnerud, make_regression
15
+ from sklearn.ensemble import VotingRegressor
16
+ from sklearn.exceptions import ConvergenceWarning
17
+ from sklearn.linear_model import LinearRegression
18
+ from sklearn.utils import check_random_state
19
+ from sklearn.utils.extmath import svd_flip
20
+
21
+
22
+ def assert_matrix_orthogonal(M):
23
+ K = np.dot(M.T, M)
24
+ assert_array_almost_equal(K, np.diag(np.diag(K)))
25
+
26
+
27
+ def test_pls_canonical_basics():
28
+ # Basic checks for PLSCanonical
29
+ d = load_linnerud()
30
+ X = d.data
31
+ Y = d.target
32
+
33
+ pls = PLSCanonical(n_components=X.shape[1])
34
+ pls.fit(X, Y)
35
+
36
+ assert_matrix_orthogonal(pls.x_weights_)
37
+ assert_matrix_orthogonal(pls.y_weights_)
38
+ assert_matrix_orthogonal(pls._x_scores)
39
+ assert_matrix_orthogonal(pls._y_scores)
40
+
41
+ # Check X = TP' and Y = UQ'
42
+ T = pls._x_scores
43
+ P = pls.x_loadings_
44
+ U = pls._y_scores
45
+ Q = pls.y_loadings_
46
+ # Need to scale first
47
+ Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(
48
+ X.copy(), Y.copy(), scale=True
49
+ )
50
+ assert_array_almost_equal(Xc, np.dot(T, P.T))
51
+ assert_array_almost_equal(Yc, np.dot(U, Q.T))
52
+
53
+ # Check that rotations on training data lead to scores
54
+ Xt = pls.transform(X)
55
+ assert_array_almost_equal(Xt, pls._x_scores)
56
+ Xt, Yt = pls.transform(X, Y)
57
+ assert_array_almost_equal(Xt, pls._x_scores)
58
+ assert_array_almost_equal(Yt, pls._y_scores)
59
+
60
+ # Check that inverse_transform works
61
+ X_back = pls.inverse_transform(Xt)
62
+ assert_array_almost_equal(X_back, X)
63
+ _, Y_back = pls.inverse_transform(Xt, Yt)
64
+ assert_array_almost_equal(Y_back, Y)
65
+
66
+
67
+ def test_sanity_check_pls_regression():
68
+ # Sanity check for PLSRegression
69
+ # The results were checked against the R-packages plspm, misOmics and pls
70
+
71
+ d = load_linnerud()
72
+ X = d.data
73
+ Y = d.target
74
+
75
+ pls = PLSRegression(n_components=X.shape[1])
76
+ X_trans, _ = pls.fit_transform(X, Y)
77
+
78
+ # FIXME: one would expect y_trans == pls.y_scores_ but this is not
79
+ # the case.
80
+ # xref: https://github.com/scikit-learn/scikit-learn/issues/22420
81
+ assert_allclose(X_trans, pls.x_scores_)
82
+
83
+ expected_x_weights = np.array(
84
+ [
85
+ [-0.61330704, -0.00443647, 0.78983213],
86
+ [-0.74697144, -0.32172099, -0.58183269],
87
+ [-0.25668686, 0.94682413, -0.19399983],
88
+ ]
89
+ )
90
+
91
+ expected_x_loadings = np.array(
92
+ [
93
+ [-0.61470416, -0.24574278, 0.78983213],
94
+ [-0.65625755, -0.14396183, -0.58183269],
95
+ [-0.51733059, 1.00609417, -0.19399983],
96
+ ]
97
+ )
98
+
99
+ expected_y_weights = np.array(
100
+ [
101
+ [+0.32456184, 0.29892183, 0.20316322],
102
+ [+0.42439636, 0.61970543, 0.19320542],
103
+ [-0.13143144, -0.26348971, -0.17092916],
104
+ ]
105
+ )
106
+
107
+ expected_y_loadings = np.array(
108
+ [
109
+ [+0.32456184, 0.29892183, 0.20316322],
110
+ [+0.42439636, 0.61970543, 0.19320542],
111
+ [-0.13143144, -0.26348971, -0.17092916],
112
+ ]
113
+ )
114
+
115
+ assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))
116
+ assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
117
+ assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
118
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
119
+
120
+ # The R / Python difference in the signs should be consistent across
121
+ # loadings, weights, etc.
122
+ x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
123
+ x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
124
+ y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
125
+ y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
126
+ assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
127
+ assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
128
+
129
+
130
+ def test_sanity_check_pls_regression_constant_column_Y():
131
+ # Check behavior when the first column of Y is constant
132
+ # The results are checked against a modified version of plsreg2
133
+ # from the R-package plsdepot
134
+ d = load_linnerud()
135
+ X = d.data
136
+ Y = d.target
137
+ Y[:, 0] = 1
138
+ pls = PLSRegression(n_components=X.shape[1])
139
+ pls.fit(X, Y)
140
+
141
+ expected_x_weights = np.array(
142
+ [
143
+ [-0.6273573, 0.007081799, 0.7786994],
144
+ [-0.7493417, -0.277612681, -0.6011807],
145
+ [-0.2119194, 0.960666981, -0.1794690],
146
+ ]
147
+ )
148
+
149
+ expected_x_loadings = np.array(
150
+ [
151
+ [-0.6273512, -0.22464538, 0.7786994],
152
+ [-0.6643156, -0.09871193, -0.6011807],
153
+ [-0.5125877, 1.01407380, -0.1794690],
154
+ ]
155
+ )
156
+
157
+ expected_y_loadings = np.array(
158
+ [
159
+ [0.0000000, 0.0000000, 0.0000000],
160
+ [0.4357300, 0.5828479, 0.2174802],
161
+ [-0.1353739, -0.2486423, -0.1810386],
162
+ ]
163
+ )
164
+
165
+ assert_array_almost_equal(np.abs(expected_x_weights), np.abs(pls.x_weights_))
166
+ assert_array_almost_equal(np.abs(expected_x_loadings), np.abs(pls.x_loadings_))
167
+ # For the PLSRegression with default parameters, y_loadings == y_weights
168
+ assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
169
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_loadings))
170
+
171
+ x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)
172
+ x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)
173
+ # we ignore the first full-zeros row for y
174
+ y_loadings_sign_flip = np.sign(expected_y_loadings[1:] / pls.y_loadings_[1:])
175
+
176
+ assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)
177
+ assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip)
178
+
179
+
180
+ def test_sanity_check_pls_canonical():
181
+ # Sanity check for PLSCanonical
182
+ # The results were checked against the R-package plspm
183
+
184
+ d = load_linnerud()
185
+ X = d.data
186
+ Y = d.target
187
+
188
+ pls = PLSCanonical(n_components=X.shape[1])
189
+ pls.fit(X, Y)
190
+
191
+ expected_x_weights = np.array(
192
+ [
193
+ [-0.61330704, 0.25616119, -0.74715187],
194
+ [-0.74697144, 0.11930791, 0.65406368],
195
+ [-0.25668686, -0.95924297, -0.11817271],
196
+ ]
197
+ )
198
+
199
+ expected_x_rotations = np.array(
200
+ [
201
+ [-0.61330704, 0.41591889, -0.62297525],
202
+ [-0.74697144, 0.31388326, 0.77368233],
203
+ [-0.25668686, -0.89237972, -0.24121788],
204
+ ]
205
+ )
206
+
207
+ expected_y_weights = np.array(
208
+ [
209
+ [+0.58989127, 0.7890047, 0.1717553],
210
+ [+0.77134053, -0.61351791, 0.16920272],
211
+ [-0.23887670, -0.03267062, 0.97050016],
212
+ ]
213
+ )
214
+
215
+ expected_y_rotations = np.array(
216
+ [
217
+ [+0.58989127, 0.7168115, 0.30665872],
218
+ [+0.77134053, -0.70791757, 0.19786539],
219
+ [-0.23887670, -0.00343595, 0.94162826],
220
+ ]
221
+ )
222
+
223
+ assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))
224
+ assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
225
+ assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))
226
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
227
+
228
+ x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)
229
+ x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
230
+ y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
231
+ y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
232
+ assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
233
+ assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
234
+
235
+ assert_matrix_orthogonal(pls.x_weights_)
236
+ assert_matrix_orthogonal(pls.y_weights_)
237
+
238
+ assert_matrix_orthogonal(pls._x_scores)
239
+ assert_matrix_orthogonal(pls._y_scores)
240
+
241
+
242
+ def test_sanity_check_pls_canonical_random():
243
+ # Sanity check for PLSCanonical on random data
244
+ # The results were checked against the R-package plspm
245
+ n = 500
246
+ p_noise = 10
247
+ q_noise = 5
248
+ # 2 latents vars:
249
+ rng = check_random_state(11)
250
+ l1 = rng.normal(size=n)
251
+ l2 = rng.normal(size=n)
252
+ latents = np.array([l1, l1, l2, l2]).T
253
+ X = latents + rng.normal(size=4 * n).reshape((n, 4))
254
+ Y = latents + rng.normal(size=4 * n).reshape((n, 4))
255
+ X = np.concatenate((X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
256
+ Y = np.concatenate((Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
257
+
258
+ pls = PLSCanonical(n_components=3)
259
+ pls.fit(X, Y)
260
+
261
+ expected_x_weights = np.array(
262
+ [
263
+ [0.65803719, 0.19197924, 0.21769083],
264
+ [0.7009113, 0.13303969, -0.15376699],
265
+ [0.13528197, -0.68636408, 0.13856546],
266
+ [0.16854574, -0.66788088, -0.12485304],
267
+ [-0.03232333, -0.04189855, 0.40690153],
268
+ [0.1148816, -0.09643158, 0.1613305],
269
+ [0.04792138, -0.02384992, 0.17175319],
270
+ [-0.06781, -0.01666137, -0.18556747],
271
+ [-0.00266945, -0.00160224, 0.11893098],
272
+ [-0.00849528, -0.07706095, 0.1570547],
273
+ [-0.00949471, -0.02964127, 0.34657036],
274
+ [-0.03572177, 0.0945091, 0.3414855],
275
+ [0.05584937, -0.02028961, -0.57682568],
276
+ [0.05744254, -0.01482333, -0.17431274],
277
+ ]
278
+ )
279
+
280
+ expected_x_loadings = np.array(
281
+ [
282
+ [0.65649254, 0.1847647, 0.15270699],
283
+ [0.67554234, 0.15237508, -0.09182247],
284
+ [0.19219925, -0.67750975, 0.08673128],
285
+ [0.2133631, -0.67034809, -0.08835483],
286
+ [-0.03178912, -0.06668336, 0.43395268],
287
+ [0.15684588, -0.13350241, 0.20578984],
288
+ [0.03337736, -0.03807306, 0.09871553],
289
+ [-0.06199844, 0.01559854, -0.1881785],
290
+ [0.00406146, -0.00587025, 0.16413253],
291
+ [-0.00374239, -0.05848466, 0.19140336],
292
+ [0.00139214, -0.01033161, 0.32239136],
293
+ [-0.05292828, 0.0953533, 0.31916881],
294
+ [0.04031924, -0.01961045, -0.65174036],
295
+ [0.06172484, -0.06597366, -0.1244497],
296
+ ]
297
+ )
298
+
299
+ expected_y_weights = np.array(
300
+ [
301
+ [0.66101097, 0.18672553, 0.22826092],
302
+ [0.69347861, 0.18463471, -0.23995597],
303
+ [0.14462724, -0.66504085, 0.17082434],
304
+ [0.22247955, -0.6932605, -0.09832993],
305
+ [0.07035859, 0.00714283, 0.67810124],
306
+ [0.07765351, -0.0105204, -0.44108074],
307
+ [-0.00917056, 0.04322147, 0.10062478],
308
+ [-0.01909512, 0.06182718, 0.28830475],
309
+ [0.01756709, 0.04797666, 0.32225745],
310
+ ]
311
+ )
312
+
313
+ expected_y_loadings = np.array(
314
+ [
315
+ [0.68568625, 0.1674376, 0.0969508],
316
+ [0.68782064, 0.20375837, -0.1164448],
317
+ [0.11712173, -0.68046903, 0.12001505],
318
+ [0.17860457, -0.6798319, -0.05089681],
319
+ [0.06265739, -0.0277703, 0.74729584],
320
+ [0.0914178, 0.00403751, -0.5135078],
321
+ [-0.02196918, -0.01377169, 0.09564505],
322
+ [-0.03288952, 0.09039729, 0.31858973],
323
+ [0.04287624, 0.05254676, 0.27836841],
324
+ ]
325
+ )
326
+
327
+ assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))
328
+ assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
329
+ assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
330
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
331
+
332
+ x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
333
+ x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
334
+ y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
335
+ y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
336
+ assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
337
+ assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
338
+
339
+ assert_matrix_orthogonal(pls.x_weights_)
340
+ assert_matrix_orthogonal(pls.y_weights_)
341
+
342
+ assert_matrix_orthogonal(pls._x_scores)
343
+ assert_matrix_orthogonal(pls._y_scores)
344
+
345
+
346
+ def test_convergence_fail():
347
+ # Make sure ConvergenceWarning is raised if max_iter is too small
348
+ d = load_linnerud()
349
+ X = d.data
350
+ Y = d.target
351
+ pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2)
352
+ with pytest.warns(ConvergenceWarning):
353
+ pls_nipals.fit(X, Y)
354
+
355
+
356
+ @pytest.mark.parametrize("Est", (PLSSVD, PLSRegression, PLSCanonical))
357
+ def test_attibutes_shapes(Est):
358
+ # Make sure attributes are of the correct shape depending on n_components
359
+ d = load_linnerud()
360
+ X = d.data
361
+ Y = d.target
362
+ n_components = 2
363
+ pls = Est(n_components=n_components)
364
+ pls.fit(X, Y)
365
+ assert all(
366
+ attr.shape[1] == n_components for attr in (pls.x_weights_, pls.y_weights_)
367
+ )
368
+
369
+
370
+ @pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA))
371
+ def test_univariate_equivalence(Est):
372
+ # Ensure 2D Y with 1 column is equivalent to 1D Y
373
+ d = load_linnerud()
374
+ X = d.data
375
+ Y = d.target
376
+
377
+ est = Est(n_components=1)
378
+ one_d_coeff = est.fit(X, Y[:, 0]).coef_
379
+ two_d_coeff = est.fit(X, Y[:, :1]).coef_
380
+
381
+ assert one_d_coeff.shape == two_d_coeff.shape
382
+ assert_array_almost_equal(one_d_coeff, two_d_coeff)
383
+
384
+
385
+ @pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA, PLSSVD))
386
+ def test_copy(Est):
387
+ # check that the "copy" keyword works
388
+ d = load_linnerud()
389
+ X = d.data
390
+ Y = d.target
391
+ X_orig = X.copy()
392
+
393
+ # copy=True won't modify inplace
394
+ pls = Est(copy=True).fit(X, Y)
395
+ assert_array_equal(X, X_orig)
396
+
397
+ # copy=False will modify inplace
398
+ with pytest.raises(AssertionError):
399
+ Est(copy=False).fit(X, Y)
400
+ assert_array_almost_equal(X, X_orig)
401
+
402
+ if Est is PLSSVD:
403
+ return # PLSSVD does not support copy param in predict or transform
404
+
405
+ X_orig = X.copy()
406
+ with pytest.raises(AssertionError):
407
+ pls.transform(X, Y, copy=False),
408
+ assert_array_almost_equal(X, X_orig)
409
+
410
+ X_orig = X.copy()
411
+ with pytest.raises(AssertionError):
412
+ pls.predict(X, copy=False),
413
+ assert_array_almost_equal(X, X_orig)
414
+
415
+ # Make sure copy=True gives same transform and predictions as predict=False
416
+ assert_array_almost_equal(
417
+ pls.transform(X, Y, copy=True), pls.transform(X.copy(), Y.copy(), copy=False)
418
+ )
419
+ assert_array_almost_equal(
420
+ pls.predict(X, copy=True), pls.predict(X.copy(), copy=False)
421
+ )
422
+
423
+
424
+ def _generate_test_scale_and_stability_datasets():
425
+ """Generate dataset for test_scale_and_stability"""
426
+ # dataset for non-regression 7818
427
+ rng = np.random.RandomState(0)
428
+ n_samples = 1000
429
+ n_targets = 5
430
+ n_features = 10
431
+ Q = rng.randn(n_targets, n_features)
432
+ Y = rng.randn(n_samples, n_targets)
433
+ X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1
434
+ X *= 1000
435
+ yield X, Y
436
+
437
+ # Data set where one of the features is constraint
438
+ X, Y = load_linnerud(return_X_y=True)
439
+ # causes X[:, -1].std() to be zero
440
+ X[:, -1] = 1.0
441
+ yield X, Y
442
+
443
+ X = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [3.0, 5.0, 4.0]])
444
+ Y = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]])
445
+ yield X, Y
446
+
447
+ # Seeds that provide a non-regression test for #18746, where CCA fails
448
+ seeds = [530, 741]
449
+ for seed in seeds:
450
+ rng = np.random.RandomState(seed)
451
+ X = rng.randn(4, 3)
452
+ Y = rng.randn(4, 2)
453
+ yield X, Y
454
+
455
+
456
+ @pytest.mark.parametrize("Est", (CCA, PLSCanonical, PLSRegression, PLSSVD))
457
+ @pytest.mark.parametrize("X, Y", _generate_test_scale_and_stability_datasets())
458
+ def test_scale_and_stability(Est, X, Y):
459
+ """scale=True is equivalent to scale=False on centered/scaled data
460
+ This allows to check numerical stability over platforms as well"""
461
+
462
+ X_s, Y_s, *_ = _center_scale_xy(X, Y)
463
+
464
+ X_score, Y_score = Est(scale=True).fit_transform(X, Y)
465
+ X_s_score, Y_s_score = Est(scale=False).fit_transform(X_s, Y_s)
466
+
467
+ assert_allclose(X_s_score, X_score, atol=1e-4)
468
+ assert_allclose(Y_s_score, Y_score, atol=1e-4)
469
+
470
+
471
+ @pytest.mark.parametrize("Estimator", (PLSSVD, PLSRegression, PLSCanonical, CCA))
472
+ def test_n_components_upper_bounds(Estimator):
473
+ """Check the validation of `n_components` upper bounds for `PLS` regressors."""
474
+ rng = np.random.RandomState(0)
475
+ X = rng.randn(10, 5)
476
+ Y = rng.randn(10, 3)
477
+ est = Estimator(n_components=10)
478
+ err_msg = "`n_components` upper bound is .*. Got 10 instead. Reduce `n_components`."
479
+ with pytest.raises(ValueError, match=err_msg):
480
+ est.fit(X, Y)
481
+
482
+
483
+ @pytest.mark.parametrize("n_samples, n_features", [(100, 10), (100, 200)])
484
+ def test_singular_value_helpers(n_samples, n_features, global_random_seed):
485
+ # Make sure SVD and power method give approximately the same results
486
+ X, Y = make_regression(
487
+ n_samples, n_features, n_targets=5, random_state=global_random_seed
488
+ )
489
+ u1, v1, _ = _get_first_singular_vectors_power_method(X, Y, norm_y_weights=True)
490
+ u2, v2 = _get_first_singular_vectors_svd(X, Y)
491
+
492
+ _svd_flip_1d(u1, v1)
493
+ _svd_flip_1d(u2, v2)
494
+
495
+ rtol = 1e-3
496
+ # Setting atol because some coordinates are very close to zero
497
+ assert_allclose(u1, u2, atol=u2.max() * rtol)
498
+ assert_allclose(v1, v2, atol=v2.max() * rtol)
499
+
500
+
501
+ def test_one_component_equivalence(global_random_seed):
502
+ # PLSSVD, PLSRegression and PLSCanonical should all be equivalent when
503
+ # n_components is 1
504
+ X, Y = make_regression(100, 10, n_targets=5, random_state=global_random_seed)
505
+ svd = PLSSVD(n_components=1).fit(X, Y).transform(X)
506
+ reg = PLSRegression(n_components=1).fit(X, Y).transform(X)
507
+ canonical = PLSCanonical(n_components=1).fit(X, Y).transform(X)
508
+
509
+ rtol = 1e-3
510
+ # Setting atol because some entries are very close to zero
511
+ assert_allclose(svd, reg, atol=reg.max() * rtol)
512
+ assert_allclose(svd, canonical, atol=canonical.max() * rtol)
513
+
514
+
515
+ def test_svd_flip_1d():
516
+ # Make sure svd_flip_1d is equivalent to svd_flip
517
+ u = np.array([1, -4, 2])
518
+ v = np.array([1, 2, 3])
519
+
520
+ u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1))
521
+ _svd_flip_1d(u, v) # inplace
522
+
523
+ assert_allclose(u, u_expected.ravel())
524
+ assert_allclose(u, [-1, 4, -2])
525
+
526
+ assert_allclose(v, v_expected.ravel())
527
+ assert_allclose(v, [-1, -2, -3])
528
+
529
+
530
+ def test_loadings_converges(global_random_seed):
531
+ """Test that CCA converges. Non-regression test for #19549."""
532
+ X, y = make_regression(
533
+ n_samples=200, n_features=20, n_targets=20, random_state=global_random_seed
534
+ )
535
+
536
+ cca = CCA(n_components=10, max_iter=500)
537
+
538
+ with warnings.catch_warnings():
539
+ warnings.simplefilter("error", ConvergenceWarning)
540
+
541
+ cca.fit(X, y)
542
+
543
+ # Loadings converges to reasonable values
544
+ assert np.all(np.abs(cca.x_loadings_) < 1)
545
+
546
+
547
+ def test_pls_constant_y():
548
+ """Checks warning when y is constant. Non-regression test for #19831"""
549
+ rng = np.random.RandomState(42)
550
+ x = rng.rand(100, 3)
551
+ y = np.zeros(100)
552
+
553
+ pls = PLSRegression()
554
+
555
+ msg = "Y residual is constant at iteration"
556
+ with pytest.warns(UserWarning, match=msg):
557
+ pls.fit(x, y)
558
+
559
+ assert_allclose(pls.x_rotations_, 0)
560
+
561
+
562
+ @pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA])
563
+ def test_pls_coef_shape(PLSEstimator):
564
+ """Check the shape of `coef_` attribute.
565
+
566
+ Non-regression test for:
567
+ https://github.com/scikit-learn/scikit-learn/issues/12410
568
+ """
569
+ d = load_linnerud()
570
+ X = d.data
571
+ Y = d.target
572
+
573
+ pls = PLSEstimator(copy=True).fit(X, Y)
574
+
575
+ n_targets, n_features = Y.shape[1], X.shape[1]
576
+ assert pls.coef_.shape == (n_targets, n_features)
577
+
578
+
579
+ @pytest.mark.parametrize("scale", [True, False])
580
+ @pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA])
581
+ def test_pls_prediction(PLSEstimator, scale):
582
+ """Check the behaviour of the prediction function."""
583
+ d = load_linnerud()
584
+ X = d.data
585
+ Y = d.target
586
+
587
+ pls = PLSEstimator(copy=True, scale=scale).fit(X, Y)
588
+ Y_pred = pls.predict(X, copy=True)
589
+
590
+ y_mean = Y.mean(axis=0)
591
+ X_trans = X - X.mean(axis=0)
592
+ if scale:
593
+ X_trans /= X.std(axis=0, ddof=1)
594
+
595
+ assert_allclose(pls.intercept_, y_mean)
596
+ assert_allclose(Y_pred, X_trans @ pls.coef_.T + pls.intercept_)
597
+
598
+
599
+ @pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical])
600
+ def test_pls_feature_names_out(Klass):
601
+ """Check `get_feature_names_out` cross_decomposition module."""
602
+ X, Y = load_linnerud(return_X_y=True)
603
+
604
+ est = Klass().fit(X, Y)
605
+ names_out = est.get_feature_names_out()
606
+
607
+ class_name_lower = Klass.__name__.lower()
608
+ expected_names_out = np.array(
609
+ [f"{class_name_lower}{i}" for i in range(est.x_weights_.shape[1])],
610
+ dtype=object,
611
+ )
612
+ assert_array_equal(names_out, expected_names_out)
613
+
614
+
615
+ @pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical])
616
+ def test_pls_set_output(Klass):
617
+ """Check `set_output` in cross_decomposition module."""
618
+ pd = pytest.importorskip("pandas")
619
+ X, Y = load_linnerud(return_X_y=True, as_frame=True)
620
+
621
+ est = Klass().set_output(transform="pandas").fit(X, Y)
622
+ X_trans, y_trans = est.transform(X, Y)
623
+ assert isinstance(y_trans, np.ndarray)
624
+ assert isinstance(X_trans, pd.DataFrame)
625
+ assert_array_equal(X_trans.columns, est.get_feature_names_out())
626
+
627
+
628
+ def test_pls_regression_fit_1d_y():
629
+ """Check that when fitting with 1d `y`, prediction should also be 1d.
630
+
631
+ Non-regression test for Issue #26549.
632
+ """
633
+ X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])
634
+ y = np.array([2, 6, 12, 20, 30, 42])
635
+ expected = y.copy()
636
+
637
+ plsr = PLSRegression().fit(X, y)
638
+ y_pred = plsr.predict(X)
639
+ assert y_pred.shape == expected.shape
640
+
641
+ # Check that it works in VotingRegressor
642
+ lr = LinearRegression().fit(X, y)
643
+ vr = VotingRegressor([("lr", lr), ("plsr", plsr)])
644
+ y_pred = vr.fit(X, y).predict(X)
645
+ assert y_pred.shape == expected.shape
646
+ assert_allclose(y_pred, expected)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.decomposition` module includes matrix decomposition
3
+ algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
4
+ this module can be regarded as dimensionality reduction techniques.
5
+ """
6
+
7
+
8
+ from ..utils.extmath import randomized_svd
9
+ from ._dict_learning import (
10
+ DictionaryLearning,
11
+ MiniBatchDictionaryLearning,
12
+ SparseCoder,
13
+ dict_learning,
14
+ dict_learning_online,
15
+ sparse_encode,
16
+ )
17
+ from ._factor_analysis import FactorAnalysis
18
+ from ._fastica import FastICA, fastica
19
+ from ._incremental_pca import IncrementalPCA
20
+ from ._kernel_pca import KernelPCA
21
+ from ._lda import LatentDirichletAllocation
22
+ from ._nmf import (
23
+ NMF,
24
+ MiniBatchNMF,
25
+ non_negative_factorization,
26
+ )
27
+ from ._pca import PCA
28
+ from ._sparse_pca import MiniBatchSparsePCA, SparsePCA
29
+ from ._truncated_svd import TruncatedSVD
30
+
31
+ __all__ = [
32
+ "DictionaryLearning",
33
+ "FastICA",
34
+ "IncrementalPCA",
35
+ "KernelPCA",
36
+ "MiniBatchDictionaryLearning",
37
+ "MiniBatchNMF",
38
+ "MiniBatchSparsePCA",
39
+ "NMF",
40
+ "PCA",
41
+ "SparseCoder",
42
+ "SparsePCA",
43
+ "dict_learning",
44
+ "dict_learning_online",
45
+ "fastica",
46
+ "non_negative_factorization",
47
+ "randomized_svd",
48
+ "sparse_encode",
49
+ "FactorAnalysis",
50
+ "TruncatedSVD",
51
+ "LatentDirichletAllocation",
52
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc ADDED
Binary file (62.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc ADDED
Binary file (18.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc ADDED
Binary file (64.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc ADDED
Binary file (16.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_base.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Principal Component Analysis Base Classes"""
2
+
3
+ # Author: Alexandre Gramfort <[email protected]>
4
+ # Olivier Grisel <[email protected]>
5
+ # Mathieu Blondel <[email protected]>
6
+ # Denis A. Engemann <[email protected]>
7
+ # Kyle Kastner <[email protected]>
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ import numpy as np
14
+ from scipy import linalg
15
+ from scipy.sparse import issparse
16
+
17
+ from ..base import BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin
18
+ from ..utils._array_api import _add_to_diagonal, device, get_namespace
19
+ from ..utils.sparsefuncs import _implicit_column_offset
20
+ from ..utils.validation import check_is_fitted
21
+
22
+
23
+ class _BasePCA(
24
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
25
+ ):
26
+ """Base class for PCA methods.
27
+
28
+ Warning: This class should not be used directly.
29
+ Use derived classes instead.
30
+ """
31
+
32
+ def get_covariance(self):
33
+ """Compute data covariance with the generative model.
34
+
35
+ ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
36
+ where S**2 contains the explained variances, and sigma2 contains the
37
+ noise variances.
38
+
39
+ Returns
40
+ -------
41
+ cov : array of shape=(n_features, n_features)
42
+ Estimated covariance of data.
43
+ """
44
+ xp, _ = get_namespace(self.components_)
45
+
46
+ components_ = self.components_
47
+ exp_var = self.explained_variance_
48
+ if self.whiten:
49
+ components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
50
+ exp_var_diff = exp_var - self.noise_variance_
51
+ exp_var_diff = xp.where(
52
+ exp_var > self.noise_variance_,
53
+ exp_var_diff,
54
+ xp.asarray(0.0, device=device(exp_var)),
55
+ )
56
+ cov = (components_.T * exp_var_diff) @ components_
57
+ _add_to_diagonal(cov, self.noise_variance_, xp)
58
+ return cov
59
+
60
+ def get_precision(self):
61
+ """Compute data precision matrix with the generative model.
62
+
63
+ Equals the inverse of the covariance but computed with
64
+ the matrix inversion lemma for efficiency.
65
+
66
+ Returns
67
+ -------
68
+ precision : array, shape=(n_features, n_features)
69
+ Estimated precision of data.
70
+ """
71
+ xp, is_array_api_compliant = get_namespace(self.components_)
72
+
73
+ n_features = self.components_.shape[1]
74
+
75
+ # handle corner cases first
76
+ if self.n_components_ == 0:
77
+ return xp.eye(n_features) / self.noise_variance_
78
+
79
+ if is_array_api_compliant:
80
+ linalg_inv = xp.linalg.inv
81
+ else:
82
+ linalg_inv = linalg.inv
83
+
84
+ if self.noise_variance_ == 0.0:
85
+ return linalg_inv(self.get_covariance())
86
+
87
+ # Get precision using matrix inversion lemma
88
+ components_ = self.components_
89
+ exp_var = self.explained_variance_
90
+ if self.whiten:
91
+ components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
92
+ exp_var_diff = exp_var - self.noise_variance_
93
+ exp_var_diff = xp.where(
94
+ exp_var > self.noise_variance_,
95
+ exp_var_diff,
96
+ xp.asarray(0.0, device=device(exp_var)),
97
+ )
98
+ precision = components_ @ components_.T / self.noise_variance_
99
+ _add_to_diagonal(precision, 1.0 / exp_var_diff, xp)
100
+ precision = components_.T @ linalg_inv(precision) @ components_
101
+ precision /= -(self.noise_variance_**2)
102
+ _add_to_diagonal(precision, 1.0 / self.noise_variance_, xp)
103
+ return precision
104
+
105
+ @abstractmethod
106
+ def fit(self, X, y=None):
107
+ """Placeholder for fit. Subclasses should implement this method!
108
+
109
+ Fit the model with X.
110
+
111
+ Parameters
112
+ ----------
113
+ X : array-like of shape (n_samples, n_features)
114
+ Training data, where `n_samples` is the number of samples and
115
+ `n_features` is the number of features.
116
+
117
+ Returns
118
+ -------
119
+ self : object
120
+ Returns the instance itself.
121
+ """
122
+
123
+ def transform(self, X):
124
+ """Apply dimensionality reduction to X.
125
+
126
+ X is projected on the first principal components previously extracted
127
+ from a training set.
128
+
129
+ Parameters
130
+ ----------
131
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
132
+ New data, where `n_samples` is the number of samples
133
+ and `n_features` is the number of features.
134
+
135
+ Returns
136
+ -------
137
+ X_new : array-like of shape (n_samples, n_components)
138
+ Projection of X in the first principal components, where `n_samples`
139
+ is the number of samples and `n_components` is the number of the components.
140
+ """
141
+ xp, _ = get_namespace(X)
142
+
143
+ check_is_fitted(self)
144
+
145
+ X = self._validate_data(
146
+ X, accept_sparse=("csr", "csc"), dtype=[xp.float64, xp.float32], reset=False
147
+ )
148
+ if self.mean_ is not None:
149
+ if issparse(X):
150
+ X = _implicit_column_offset(X, self.mean_)
151
+ else:
152
+ X = X - self.mean_
153
+ X_transformed = X @ self.components_.T
154
+ if self.whiten:
155
+ X_transformed /= xp.sqrt(self.explained_variance_)
156
+ return X_transformed
157
+
158
+ def inverse_transform(self, X):
159
+ """Transform data back to its original space.
160
+
161
+ In other words, return an input `X_original` whose transform would be X.
162
+
163
+ Parameters
164
+ ----------
165
+ X : array-like of shape (n_samples, n_components)
166
+ New data, where `n_samples` is the number of samples
167
+ and `n_components` is the number of components.
168
+
169
+ Returns
170
+ -------
171
+ X_original array-like of shape (n_samples, n_features)
172
+ Original data, where `n_samples` is the number of samples
173
+ and `n_features` is the number of features.
174
+
175
+ Notes
176
+ -----
177
+ If whitening is enabled, inverse_transform will compute the
178
+ exact inverse operation, which includes reversing whitening.
179
+ """
180
+ xp, _ = get_namespace(X)
181
+
182
+ if self.whiten:
183
+ scaled_components = (
184
+ xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_
185
+ )
186
+ return X @ scaled_components + self.mean_
187
+ else:
188
+ return X @ self.components_ + self.mean_
189
+
190
+ @property
191
+ def _n_features_out(self):
192
+ """Number of transformed output features."""
193
+ return self.components_.shape[0]
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Factor Analysis.
2
+
3
+ A latent linear variable model.
4
+
5
+ FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
6
+ While PCA assumes Gaussian noise with the same variance for each
7
+ feature, the FactorAnalysis model assumes different variances for
8
+ each of them.
9
+
10
+ This implementation is based on David Barber's Book,
11
+ Bayesian Reasoning and Machine Learning,
12
+ http://www.cs.ucl.ac.uk/staff/d.barber/brml,
13
+ Algorithm 21.1
14
+ """
15
+
16
+ # Author: Christian Osendorfer <[email protected]>
17
+ # Alexandre Gramfort <[email protected]>
18
+ # Denis A. Engemann <[email protected]>
19
+
20
+ # License: BSD3
21
+
22
+ import warnings
23
+ from math import log, sqrt
24
+ from numbers import Integral, Real
25
+
26
+ import numpy as np
27
+ from scipy import linalg
28
+
29
+ from ..base import (
30
+ BaseEstimator,
31
+ ClassNamePrefixFeaturesOutMixin,
32
+ TransformerMixin,
33
+ _fit_context,
34
+ )
35
+ from ..exceptions import ConvergenceWarning
36
+ from ..utils import check_random_state
37
+ from ..utils._param_validation import Interval, StrOptions
38
+ from ..utils.extmath import fast_logdet, randomized_svd, squared_norm
39
+ from ..utils.validation import check_is_fitted
40
+
41
+
42
+ class FactorAnalysis(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
43
+ """Factor Analysis (FA).
44
+
45
+ A simple linear generative model with Gaussian latent variables.
46
+
47
+ The observations are assumed to be caused by a linear transformation of
48
+ lower dimensional latent factors and added Gaussian noise.
49
+ Without loss of generality the factors are distributed according to a
50
+ Gaussian with zero mean and unit covariance. The noise is also zero mean
51
+ and has an arbitrary diagonal covariance matrix.
52
+
53
+ If we would restrict the model further, by assuming that the Gaussian
54
+ noise is even isotropic (all diagonal entries are the same) we would obtain
55
+ :class:`PCA`.
56
+
57
+ FactorAnalysis performs a maximum likelihood estimate of the so-called
58
+ `loading` matrix, the transformation of the latent variables to the
59
+ observed ones, using SVD based approach.
60
+
61
+ Read more in the :ref:`User Guide <FA>`.
62
+
63
+ .. versionadded:: 0.13
64
+
65
+ Parameters
66
+ ----------
67
+ n_components : int, default=None
68
+ Dimensionality of latent space, the number of components
69
+ of ``X`` that are obtained after ``transform``.
70
+ If None, n_components is set to the number of features.
71
+
72
+ tol : float, default=1e-2
73
+ Stopping tolerance for log-likelihood increase.
74
+
75
+ copy : bool, default=True
76
+ Whether to make a copy of X. If ``False``, the input X gets overwritten
77
+ during fitting.
78
+
79
+ max_iter : int, default=1000
80
+ Maximum number of iterations.
81
+
82
+ noise_variance_init : array-like of shape (n_features,), default=None
83
+ The initial guess of the noise variance for each feature.
84
+ If None, it defaults to np.ones(n_features).
85
+
86
+ svd_method : {'lapack', 'randomized'}, default='randomized'
87
+ Which SVD method to use. If 'lapack' use standard SVD from
88
+ scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
89
+ Defaults to 'randomized'. For most applications 'randomized' will
90
+ be sufficiently precise while providing significant speed gains.
91
+ Accuracy can also be improved by setting higher values for
92
+ `iterated_power`. If this is not sufficient, for maximum precision
93
+ you should choose 'lapack'.
94
+
95
+ iterated_power : int, default=3
96
+ Number of iterations for the power method. 3 by default. Only used
97
+ if ``svd_method`` equals 'randomized'.
98
+
99
+ rotation : {'varimax', 'quartimax'}, default=None
100
+ If not None, apply the indicated rotation. Currently, varimax and
101
+ quartimax are implemented. See
102
+ `"The varimax criterion for analytic rotation in factor analysis"
103
+ <https://link.springer.com/article/10.1007%2FBF02289233>`_
104
+ H. F. Kaiser, 1958.
105
+
106
+ .. versionadded:: 0.24
107
+
108
+ random_state : int or RandomState instance, default=0
109
+ Only used when ``svd_method`` equals 'randomized'. Pass an int for
110
+ reproducible results across multiple function calls.
111
+ See :term:`Glossary <random_state>`.
112
+
113
+ Attributes
114
+ ----------
115
+ components_ : ndarray of shape (n_components, n_features)
116
+ Components with maximum variance.
117
+
118
+ loglike_ : list of shape (n_iterations,)
119
+ The log likelihood at each iteration.
120
+
121
+ noise_variance_ : ndarray of shape (n_features,)
122
+ The estimated noise variance for each feature.
123
+
124
+ n_iter_ : int
125
+ Number of iterations run.
126
+
127
+ mean_ : ndarray of shape (n_features,)
128
+ Per-feature empirical mean, estimated from the training set.
129
+
130
+ n_features_in_ : int
131
+ Number of features seen during :term:`fit`.
132
+
133
+ .. versionadded:: 0.24
134
+
135
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
136
+ Names of features seen during :term:`fit`. Defined only when `X`
137
+ has feature names that are all strings.
138
+
139
+ .. versionadded:: 1.0
140
+
141
+ See Also
142
+ --------
143
+ PCA: Principal component analysis is also a latent linear variable model
144
+ which however assumes equal noise variance for each feature.
145
+ This extra assumption makes probabilistic PCA faster as it can be
146
+ computed in closed form.
147
+ FastICA: Independent component analysis, a latent variable model with
148
+ non-Gaussian latent variables.
149
+
150
+ References
151
+ ----------
152
+ - David Barber, Bayesian Reasoning and Machine Learning,
153
+ Algorithm 21.1.
154
+
155
+ - Christopher M. Bishop: Pattern Recognition and Machine Learning,
156
+ Chapter 12.2.4.
157
+
158
+ Examples
159
+ --------
160
+ >>> from sklearn.datasets import load_digits
161
+ >>> from sklearn.decomposition import FactorAnalysis
162
+ >>> X, _ = load_digits(return_X_y=True)
163
+ >>> transformer = FactorAnalysis(n_components=7, random_state=0)
164
+ >>> X_transformed = transformer.fit_transform(X)
165
+ >>> X_transformed.shape
166
+ (1797, 7)
167
+ """
168
+
169
+ _parameter_constraints: dict = {
170
+ "n_components": [Interval(Integral, 0, None, closed="left"), None],
171
+ "tol": [Interval(Real, 0.0, None, closed="left")],
172
+ "copy": ["boolean"],
173
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
174
+ "noise_variance_init": ["array-like", None],
175
+ "svd_method": [StrOptions({"randomized", "lapack"})],
176
+ "iterated_power": [Interval(Integral, 0, None, closed="left")],
177
+ "rotation": [StrOptions({"varimax", "quartimax"}), None],
178
+ "random_state": ["random_state"],
179
+ }
180
+
181
+ def __init__(
182
+ self,
183
+ n_components=None,
184
+ *,
185
+ tol=1e-2,
186
+ copy=True,
187
+ max_iter=1000,
188
+ noise_variance_init=None,
189
+ svd_method="randomized",
190
+ iterated_power=3,
191
+ rotation=None,
192
+ random_state=0,
193
+ ):
194
+ self.n_components = n_components
195
+ self.copy = copy
196
+ self.tol = tol
197
+ self.max_iter = max_iter
198
+ self.svd_method = svd_method
199
+
200
+ self.noise_variance_init = noise_variance_init
201
+ self.iterated_power = iterated_power
202
+ self.random_state = random_state
203
+ self.rotation = rotation
204
+
205
+ @_fit_context(prefer_skip_nested_validation=True)
206
+ def fit(self, X, y=None):
207
+ """Fit the FactorAnalysis model to X using SVD based approach.
208
+
209
+ Parameters
210
+ ----------
211
+ X : array-like of shape (n_samples, n_features)
212
+ Training data.
213
+
214
+ y : Ignored
215
+ Ignored parameter.
216
+
217
+ Returns
218
+ -------
219
+ self : object
220
+ FactorAnalysis class instance.
221
+ """
222
+ X = self._validate_data(X, copy=self.copy, dtype=np.float64)
223
+
224
+ n_samples, n_features = X.shape
225
+ n_components = self.n_components
226
+ if n_components is None:
227
+ n_components = n_features
228
+
229
+ self.mean_ = np.mean(X, axis=0)
230
+ X -= self.mean_
231
+
232
+ # some constant terms
233
+ nsqrt = sqrt(n_samples)
234
+ llconst = n_features * log(2.0 * np.pi) + n_components
235
+ var = np.var(X, axis=0)
236
+
237
+ if self.noise_variance_init is None:
238
+ psi = np.ones(n_features, dtype=X.dtype)
239
+ else:
240
+ if len(self.noise_variance_init) != n_features:
241
+ raise ValueError(
242
+ "noise_variance_init dimension does not "
243
+ "with number of features : %d != %d"
244
+ % (len(self.noise_variance_init), n_features)
245
+ )
246
+ psi = np.array(self.noise_variance_init)
247
+
248
+ loglike = []
249
+ old_ll = -np.inf
250
+ SMALL = 1e-12
251
+
252
+ # we'll modify svd outputs to return unexplained variance
253
+ # to allow for unified computation of loglikelihood
254
+ if self.svd_method == "lapack":
255
+
256
+ def my_svd(X):
257
+ _, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
258
+ return (
259
+ s[:n_components],
260
+ Vt[:n_components],
261
+ squared_norm(s[n_components:]),
262
+ )
263
+
264
+ else: # svd_method == "randomized"
265
+ random_state = check_random_state(self.random_state)
266
+
267
+ def my_svd(X):
268
+ _, s, Vt = randomized_svd(
269
+ X,
270
+ n_components,
271
+ random_state=random_state,
272
+ n_iter=self.iterated_power,
273
+ )
274
+ return s, Vt, squared_norm(X) - squared_norm(s)
275
+
276
+ for i in range(self.max_iter):
277
+ # SMALL helps numerics
278
+ sqrt_psi = np.sqrt(psi) + SMALL
279
+ s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
280
+ s **= 2
281
+ # Use 'maximum' here to avoid sqrt problems.
282
+ W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt
283
+ del Vt
284
+ W *= sqrt_psi
285
+
286
+ # loglikelihood
287
+ ll = llconst + np.sum(np.log(s))
288
+ ll += unexp_var + np.sum(np.log(psi))
289
+ ll *= -n_samples / 2.0
290
+ loglike.append(ll)
291
+ if (ll - old_ll) < self.tol:
292
+ break
293
+ old_ll = ll
294
+
295
+ psi = np.maximum(var - np.sum(W**2, axis=0), SMALL)
296
+ else:
297
+ warnings.warn(
298
+ "FactorAnalysis did not converge."
299
+ + " You might want"
300
+ + " to increase the number of iterations.",
301
+ ConvergenceWarning,
302
+ )
303
+
304
+ self.components_ = W
305
+ if self.rotation is not None:
306
+ self.components_ = self._rotate(W)
307
+ self.noise_variance_ = psi
308
+ self.loglike_ = loglike
309
+ self.n_iter_ = i + 1
310
+ return self
311
+
312
+ def transform(self, X):
313
+ """Apply dimensionality reduction to X using the model.
314
+
315
+ Compute the expected mean of the latent variables.
316
+ See Barber, 21.2.33 (or Bishop, 12.66).
317
+
318
+ Parameters
319
+ ----------
320
+ X : array-like of shape (n_samples, n_features)
321
+ Training data.
322
+
323
+ Returns
324
+ -------
325
+ X_new : ndarray of shape (n_samples, n_components)
326
+ The latent variables of X.
327
+ """
328
+ check_is_fitted(self)
329
+
330
+ X = self._validate_data(X, reset=False)
331
+ Ih = np.eye(len(self.components_))
332
+
333
+ X_transformed = X - self.mean_
334
+
335
+ Wpsi = self.components_ / self.noise_variance_
336
+ cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
337
+ tmp = np.dot(X_transformed, Wpsi.T)
338
+ X_transformed = np.dot(tmp, cov_z)
339
+
340
+ return X_transformed
341
+
342
+ def get_covariance(self):
343
+ """Compute data covariance with the FactorAnalysis model.
344
+
345
+ ``cov = components_.T * components_ + diag(noise_variance)``
346
+
347
+ Returns
348
+ -------
349
+ cov : ndarray of shape (n_features, n_features)
350
+ Estimated covariance of data.
351
+ """
352
+ check_is_fitted(self)
353
+
354
+ cov = np.dot(self.components_.T, self.components_)
355
+ cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
356
+ return cov
357
+
358
+ def get_precision(self):
359
+ """Compute data precision matrix with the FactorAnalysis model.
360
+
361
+ Returns
362
+ -------
363
+ precision : ndarray of shape (n_features, n_features)
364
+ Estimated precision of data.
365
+ """
366
+ check_is_fitted(self)
367
+
368
+ n_features = self.components_.shape[1]
369
+
370
+ # handle corner cases first
371
+ if self.n_components == 0:
372
+ return np.diag(1.0 / self.noise_variance_)
373
+ if self.n_components == n_features:
374
+ return linalg.inv(self.get_covariance())
375
+
376
+ # Get precision using matrix inversion lemma
377
+ components_ = self.components_
378
+ precision = np.dot(components_ / self.noise_variance_, components_.T)
379
+ precision.flat[:: len(precision) + 1] += 1.0
380
+ precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
381
+ precision /= self.noise_variance_[:, np.newaxis]
382
+ precision /= -self.noise_variance_[np.newaxis, :]
383
+ precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
384
+ return precision
385
+
386
+ def score_samples(self, X):
387
+ """Compute the log-likelihood of each sample.
388
+
389
+ Parameters
390
+ ----------
391
+ X : ndarray of shape (n_samples, n_features)
392
+ The data.
393
+
394
+ Returns
395
+ -------
396
+ ll : ndarray of shape (n_samples,)
397
+ Log-likelihood of each sample under the current model.
398
+ """
399
+ check_is_fitted(self)
400
+ X = self._validate_data(X, reset=False)
401
+ Xr = X - self.mean_
402
+ precision = self.get_precision()
403
+ n_features = X.shape[1]
404
+ log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
405
+ log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
406
+ return log_like
407
+
408
+ def score(self, X, y=None):
409
+ """Compute the average log-likelihood of the samples.
410
+
411
+ Parameters
412
+ ----------
413
+ X : ndarray of shape (n_samples, n_features)
414
+ The data.
415
+
416
+ y : Ignored
417
+ Ignored parameter.
418
+
419
+ Returns
420
+ -------
421
+ ll : float
422
+ Average log-likelihood of the samples under the current model.
423
+ """
424
+ return np.mean(self.score_samples(X))
425
+
426
+ def _rotate(self, components, n_components=None, tol=1e-6):
427
+ "Rotate the factor analysis solution."
428
+ # note that tol is not exposed
429
+ return _ortho_rotation(components.T, method=self.rotation, tol=tol)[
430
+ : self.n_components
431
+ ]
432
+
433
+ @property
434
+ def _n_features_out(self):
435
+ """Number of transformed output features."""
436
+ return self.components_.shape[0]
437
+
438
+
439
+ def _ortho_rotation(components, method="varimax", tol=1e-6, max_iter=100):
440
+ """Return rotated components."""
441
+ nrow, ncol = components.shape
442
+ rotation_matrix = np.eye(ncol)
443
+ var = 0
444
+
445
+ for _ in range(max_iter):
446
+ comp_rot = np.dot(components, rotation_matrix)
447
+ if method == "varimax":
448
+ tmp = comp_rot * np.transpose((comp_rot**2).sum(axis=0) / nrow)
449
+ elif method == "quartimax":
450
+ tmp = 0
451
+ u, s, v = np.linalg.svd(np.dot(components.T, comp_rot**3 - tmp))
452
+ rotation_matrix = np.dot(u, v)
453
+ var_new = np.sum(s)
454
+ if var != 0 and var_new < var * (1 + tol):
455
+ break
456
+ var = var_new
457
+
458
+ return np.dot(components, rotation_matrix).T
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Incremental Principal Components Analysis."""
2
+
3
+ # Author: Kyle Kastner <[email protected]>
4
+ # Giorgio Patrini
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral
8
+
9
+ import numpy as np
10
+ from scipy import linalg, sparse
11
+
12
+ from ..base import _fit_context
13
+ from ..utils import gen_batches
14
+ from ..utils._param_validation import Interval
15
+ from ..utils.extmath import _incremental_mean_and_var, svd_flip
16
+ from ._base import _BasePCA
17
+
18
+
19
+ class IncrementalPCA(_BasePCA):
20
+ """Incremental principal components analysis (IPCA).
21
+
22
+ Linear dimensionality reduction using Singular Value Decomposition of
23
+ the data, keeping only the most significant singular vectors to
24
+ project the data to a lower dimensional space. The input data is centered
25
+ but not scaled for each feature before applying the SVD.
26
+
27
+ Depending on the size of the input data, this algorithm can be much more
28
+ memory efficient than a PCA, and allows sparse input.
29
+
30
+ This algorithm has constant memory complexity, on the order
31
+ of ``batch_size * n_features``, enabling use of np.memmap files without
32
+ loading the entire file into memory. For sparse matrices, the input
33
+ is converted to dense in batches (in order to be able to subtract the
34
+ mean) which avoids storing the entire dense matrix at any one time.
35
+
36
+ The computational overhead of each SVD is
37
+ ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
38
+ remain in memory at a time. There will be ``n_samples / batch_size`` SVD
39
+ computations to get the principal components, versus 1 large SVD of
40
+ complexity ``O(n_samples * n_features ** 2)`` for PCA.
41
+
42
+ For a usage example, see
43
+ :ref:`sphx_glr_auto_examples_decomposition_plot_incremental_pca.py`.
44
+
45
+ Read more in the :ref:`User Guide <IncrementalPCA>`.
46
+
47
+ .. versionadded:: 0.16
48
+
49
+ Parameters
50
+ ----------
51
+ n_components : int, default=None
52
+ Number of components to keep. If ``n_components`` is ``None``,
53
+ then ``n_components`` is set to ``min(n_samples, n_features)``.
54
+
55
+ whiten : bool, default=False
56
+ When True (False by default) the ``components_`` vectors are divided
57
+ by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
58
+ with unit component-wise variances.
59
+
60
+ Whitening will remove some information from the transformed signal
61
+ (the relative variance scales of the components) but can sometimes
62
+ improve the predictive accuracy of the downstream estimators by
63
+ making data respect some hard-wired assumptions.
64
+
65
+ copy : bool, default=True
66
+ If False, X will be overwritten. ``copy=False`` can be used to
67
+ save memory but is unsafe for general use.
68
+
69
+ batch_size : int, default=None
70
+ The number of samples to use for each batch. Only used when calling
71
+ ``fit``. If ``batch_size`` is ``None``, then ``batch_size``
72
+ is inferred from the data and set to ``5 * n_features``, to provide a
73
+ balance between approximation accuracy and memory consumption.
74
+
75
+ Attributes
76
+ ----------
77
+ components_ : ndarray of shape (n_components, n_features)
78
+ Principal axes in feature space, representing the directions of
79
+ maximum variance in the data. Equivalently, the right singular
80
+ vectors of the centered input data, parallel to its eigenvectors.
81
+ The components are sorted by decreasing ``explained_variance_``.
82
+
83
+ explained_variance_ : ndarray of shape (n_components,)
84
+ Variance explained by each of the selected components.
85
+
86
+ explained_variance_ratio_ : ndarray of shape (n_components,)
87
+ Percentage of variance explained by each of the selected components.
88
+ If all components are stored, the sum of explained variances is equal
89
+ to 1.0.
90
+
91
+ singular_values_ : ndarray of shape (n_components,)
92
+ The singular values corresponding to each of the selected components.
93
+ The singular values are equal to the 2-norms of the ``n_components``
94
+ variables in the lower-dimensional space.
95
+
96
+ mean_ : ndarray of shape (n_features,)
97
+ Per-feature empirical mean, aggregate over calls to ``partial_fit``.
98
+
99
+ var_ : ndarray of shape (n_features,)
100
+ Per-feature empirical variance, aggregate over calls to
101
+ ``partial_fit``.
102
+
103
+ noise_variance_ : float
104
+ The estimated noise covariance following the Probabilistic PCA model
105
+ from Tipping and Bishop 1999. See "Pattern Recognition and
106
+ Machine Learning" by C. Bishop, 12.2.1 p. 574 or
107
+ http://www.miketipping.com/papers/met-mppca.pdf.
108
+
109
+ n_components_ : int
110
+ The estimated number of components. Relevant when
111
+ ``n_components=None``.
112
+
113
+ n_samples_seen_ : int
114
+ The number of samples processed by the estimator. Will be reset on
115
+ new calls to fit, but increments across ``partial_fit`` calls.
116
+
117
+ batch_size_ : int
118
+ Inferred batch size from ``batch_size``.
119
+
120
+ n_features_in_ : int
121
+ Number of features seen during :term:`fit`.
122
+
123
+ .. versionadded:: 0.24
124
+
125
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
126
+ Names of features seen during :term:`fit`. Defined only when `X`
127
+ has feature names that are all strings.
128
+
129
+ .. versionadded:: 1.0
130
+
131
+ See Also
132
+ --------
133
+ PCA : Principal component analysis (PCA).
134
+ KernelPCA : Kernel Principal component analysis (KPCA).
135
+ SparsePCA : Sparse Principal Components Analysis (SparsePCA).
136
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
137
+
138
+ Notes
139
+ -----
140
+ Implements the incremental PCA model from:
141
+ *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
142
+ Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
143
+ pp. 125-141, May 2008.*
144
+ See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
145
+
146
+ This model is an extension of the Sequential Karhunen-Loeve Transform from:
147
+ :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
148
+ its Application to Images, IEEE Transactions on Image Processing, Volume 9,
149
+ Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>`
150
+
151
+ We have specifically abstained from an optimization used by authors of both
152
+ papers, a QR decomposition used in specific situations to reduce the
153
+ algorithmic complexity of the SVD. The source for this technique is
154
+ *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
155
+ section 5.4.4, pp 252-253.*. This technique has been omitted because it is
156
+ advantageous only when decomposing a matrix with ``n_samples`` (rows)
157
+ >= 5/3 * ``n_features`` (columns), and hurts the readability of the
158
+ implemented algorithm. This would be a good opportunity for future
159
+ optimization, if it is deemed necessary.
160
+
161
+ References
162
+ ----------
163
+ D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
164
+ Tracking, International Journal of Computer Vision, Volume 77,
165
+ Issue 1-3, pp. 125-141, May 2008.
166
+
167
+ G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
168
+ Section 5.4.4, pp. 252-253.
169
+
170
+ Examples
171
+ --------
172
+ >>> from sklearn.datasets import load_digits
173
+ >>> from sklearn.decomposition import IncrementalPCA
174
+ >>> from scipy import sparse
175
+ >>> X, _ = load_digits(return_X_y=True)
176
+ >>> transformer = IncrementalPCA(n_components=7, batch_size=200)
177
+ >>> # either partially fit on smaller batches of data
178
+ >>> transformer.partial_fit(X[:100, :])
179
+ IncrementalPCA(batch_size=200, n_components=7)
180
+ >>> # or let the fit function itself divide the data into batches
181
+ >>> X_sparse = sparse.csr_matrix(X)
182
+ >>> X_transformed = transformer.fit_transform(X_sparse)
183
+ >>> X_transformed.shape
184
+ (1797, 7)
185
+ """
186
+
187
+ _parameter_constraints: dict = {
188
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
189
+ "whiten": ["boolean"],
190
+ "copy": ["boolean"],
191
+ "batch_size": [Interval(Integral, 1, None, closed="left"), None],
192
+ }
193
+
194
+ def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None):
195
+ self.n_components = n_components
196
+ self.whiten = whiten
197
+ self.copy = copy
198
+ self.batch_size = batch_size
199
+
200
+ @_fit_context(prefer_skip_nested_validation=True)
201
+ def fit(self, X, y=None):
202
+ """Fit the model with X, using minibatches of size batch_size.
203
+
204
+ Parameters
205
+ ----------
206
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
207
+ Training data, where `n_samples` is the number of samples and
208
+ `n_features` is the number of features.
209
+
210
+ y : Ignored
211
+ Not used, present for API consistency by convention.
212
+
213
+ Returns
214
+ -------
215
+ self : object
216
+ Returns the instance itself.
217
+ """
218
+ self.components_ = None
219
+ self.n_samples_seen_ = 0
220
+ self.mean_ = 0.0
221
+ self.var_ = 0.0
222
+ self.singular_values_ = None
223
+ self.explained_variance_ = None
224
+ self.explained_variance_ratio_ = None
225
+ self.noise_variance_ = None
226
+
227
+ X = self._validate_data(
228
+ X,
229
+ accept_sparse=["csr", "csc", "lil"],
230
+ copy=self.copy,
231
+ dtype=[np.float64, np.float32],
232
+ )
233
+ n_samples, n_features = X.shape
234
+
235
+ if self.batch_size is None:
236
+ self.batch_size_ = 5 * n_features
237
+ else:
238
+ self.batch_size_ = self.batch_size
239
+
240
+ for batch in gen_batches(
241
+ n_samples, self.batch_size_, min_batch_size=self.n_components or 0
242
+ ):
243
+ X_batch = X[batch]
244
+ if sparse.issparse(X_batch):
245
+ X_batch = X_batch.toarray()
246
+ self.partial_fit(X_batch, check_input=False)
247
+
248
+ return self
249
+
250
+ @_fit_context(prefer_skip_nested_validation=True)
251
+ def partial_fit(self, X, y=None, check_input=True):
252
+ """Incremental fit with X. All of X is processed as a single batch.
253
+
254
+ Parameters
255
+ ----------
256
+ X : array-like of shape (n_samples, n_features)
257
+ Training data, where `n_samples` is the number of samples and
258
+ `n_features` is the number of features.
259
+
260
+ y : Ignored
261
+ Not used, present for API consistency by convention.
262
+
263
+ check_input : bool, default=True
264
+ Run check_array on X.
265
+
266
+ Returns
267
+ -------
268
+ self : object
269
+ Returns the instance itself.
270
+ """
271
+ first_pass = not hasattr(self, "components_")
272
+
273
+ if check_input:
274
+ if sparse.issparse(X):
275
+ raise TypeError(
276
+ "IncrementalPCA.partial_fit does not support "
277
+ "sparse input. Either convert data to dense "
278
+ "or use IncrementalPCA.fit to do so in batches."
279
+ )
280
+ X = self._validate_data(
281
+ X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass
282
+ )
283
+ n_samples, n_features = X.shape
284
+ if first_pass:
285
+ self.components_ = None
286
+
287
+ if self.n_components is None:
288
+ if self.components_ is None:
289
+ self.n_components_ = min(n_samples, n_features)
290
+ else:
291
+ self.n_components_ = self.components_.shape[0]
292
+ elif not self.n_components <= n_features:
293
+ raise ValueError(
294
+ "n_components=%r invalid for n_features=%d, need "
295
+ "more rows than columns for IncrementalPCA "
296
+ "processing" % (self.n_components, n_features)
297
+ )
298
+ elif not self.n_components <= n_samples:
299
+ raise ValueError(
300
+ "n_components=%r must be less or equal to "
301
+ "the batch number of samples "
302
+ "%d." % (self.n_components, n_samples)
303
+ )
304
+ else:
305
+ self.n_components_ = self.n_components
306
+
307
+ if (self.components_ is not None) and (
308
+ self.components_.shape[0] != self.n_components_
309
+ ):
310
+ raise ValueError(
311
+ "Number of input features has changed from %i "
312
+ "to %i between calls to partial_fit! Try "
313
+ "setting n_components to a fixed value."
314
+ % (self.components_.shape[0], self.n_components_)
315
+ )
316
+
317
+ # This is the first partial_fit
318
+ if not hasattr(self, "n_samples_seen_"):
319
+ self.n_samples_seen_ = 0
320
+ self.mean_ = 0.0
321
+ self.var_ = 0.0
322
+
323
+ # Update stats - they are 0 if this is the first step
324
+ col_mean, col_var, n_total_samples = _incremental_mean_and_var(
325
+ X,
326
+ last_mean=self.mean_,
327
+ last_variance=self.var_,
328
+ last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
329
+ )
330
+ n_total_samples = n_total_samples[0]
331
+
332
+ # Whitening
333
+ if self.n_samples_seen_ == 0:
334
+ # If it is the first step, simply whiten X
335
+ X -= col_mean
336
+ else:
337
+ col_batch_mean = np.mean(X, axis=0)
338
+ X -= col_batch_mean
339
+ # Build matrix of combined previous basis and new data
340
+ mean_correction = np.sqrt(
341
+ (self.n_samples_seen_ / n_total_samples) * n_samples
342
+ ) * (self.mean_ - col_batch_mean)
343
+ X = np.vstack(
344
+ (
345
+ self.singular_values_.reshape((-1, 1)) * self.components_,
346
+ X,
347
+ mean_correction,
348
+ )
349
+ )
350
+
351
+ U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
352
+ U, Vt = svd_flip(U, Vt, u_based_decision=False)
353
+ explained_variance = S**2 / (n_total_samples - 1)
354
+ explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples)
355
+
356
+ self.n_samples_seen_ = n_total_samples
357
+ self.components_ = Vt[: self.n_components_]
358
+ self.singular_values_ = S[: self.n_components_]
359
+ self.mean_ = col_mean
360
+ self.var_ = col_var
361
+ self.explained_variance_ = explained_variance[: self.n_components_]
362
+ self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
363
+ # we already checked `self.n_components <= n_samples` above
364
+ if self.n_components_ not in (n_samples, n_features):
365
+ self.noise_variance_ = explained_variance[self.n_components_ :].mean()
366
+ else:
367
+ self.noise_variance_ = 0.0
368
+ return self
369
+
370
+ def transform(self, X):
371
+ """Apply dimensionality reduction to X.
372
+
373
+ X is projected on the first principal components previously extracted
374
+ from a training set, using minibatches of size batch_size if X is
375
+ sparse.
376
+
377
+ Parameters
378
+ ----------
379
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
380
+ New data, where `n_samples` is the number of samples
381
+ and `n_features` is the number of features.
382
+
383
+ Returns
384
+ -------
385
+ X_new : ndarray of shape (n_samples, n_components)
386
+ Projection of X in the first principal components.
387
+
388
+ Examples
389
+ --------
390
+
391
+ >>> import numpy as np
392
+ >>> from sklearn.decomposition import IncrementalPCA
393
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
394
+ ... [1, 1], [2, 1], [3, 2]])
395
+ >>> ipca = IncrementalPCA(n_components=2, batch_size=3)
396
+ >>> ipca.fit(X)
397
+ IncrementalPCA(batch_size=3, n_components=2)
398
+ >>> ipca.transform(X) # doctest: +SKIP
399
+ """
400
+ if sparse.issparse(X):
401
+ n_samples = X.shape[0]
402
+ output = []
403
+ for batch in gen_batches(
404
+ n_samples, self.batch_size_, min_batch_size=self.n_components or 0
405
+ ):
406
+ output.append(super().transform(X[batch].toarray()))
407
+ return np.vstack(output)
408
+ else:
409
+ return super().transform(X)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py ADDED
@@ -0,0 +1,2443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Non-negative matrix factorization.
2
+ """
3
+ # Author: Vlad Niculae
4
+ # Lars Buitinck
5
+ # Mathieu Blondel <[email protected]>
6
+ # Tom Dupre la Tour
7
+ # License: BSD 3 clause
8
+
9
+ import itertools
10
+ import time
11
+ import warnings
12
+ from abc import ABC
13
+ from math import sqrt
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ import scipy.sparse as sp
18
+ from scipy import linalg
19
+
20
+ from .._config import config_context
21
+ from ..base import (
22
+ BaseEstimator,
23
+ ClassNamePrefixFeaturesOutMixin,
24
+ TransformerMixin,
25
+ _fit_context,
26
+ )
27
+ from ..exceptions import ConvergenceWarning
28
+ from ..utils import check_array, check_random_state, gen_batches, metadata_routing
29
+ from ..utils._param_validation import (
30
+ Hidden,
31
+ Interval,
32
+ StrOptions,
33
+ validate_params,
34
+ )
35
+ from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
36
+ from ..utils.validation import (
37
+ check_is_fitted,
38
+ check_non_negative,
39
+ )
40
+ from ._cdnmf_fast import _update_cdnmf_fast
41
+
42
+ EPSILON = np.finfo(np.float32).eps
43
+
44
+
45
+ def norm(x):
46
+ """Dot product-based Euclidean norm implementation.
47
+
48
+ See: http://fa.bianp.net/blog/2011/computing-the-vector-norm/
49
+
50
+ Parameters
51
+ ----------
52
+ x : array-like
53
+ Vector for which to compute the norm.
54
+ """
55
+ return sqrt(squared_norm(x))
56
+
57
+
58
+ def trace_dot(X, Y):
59
+ """Trace of np.dot(X, Y.T).
60
+
61
+ Parameters
62
+ ----------
63
+ X : array-like
64
+ First matrix.
65
+ Y : array-like
66
+ Second matrix.
67
+ """
68
+ return np.dot(X.ravel(), Y.ravel())
69
+
70
+
71
+ def _check_init(A, shape, whom):
72
+ A = check_array(A)
73
+ if shape[0] != "auto" and A.shape[0] != shape[0]:
74
+ raise ValueError(
75
+ f"Array with wrong first dimension passed to {whom}. Expected {shape[0]}, "
76
+ f"but got {A.shape[0]}."
77
+ )
78
+ if shape[1] != "auto" and A.shape[1] != shape[1]:
79
+ raise ValueError(
80
+ f"Array with wrong second dimension passed to {whom}. Expected {shape[1]}, "
81
+ f"but got {A.shape[1]}."
82
+ )
83
+ check_non_negative(A, whom)
84
+ if np.max(A) == 0:
85
+ raise ValueError(f"Array passed to {whom} is full of zeros.")
86
+
87
+
88
+ def _beta_divergence(X, W, H, beta, square_root=False):
89
+ """Compute the beta-divergence of X and dot(W, H).
90
+
91
+ Parameters
92
+ ----------
93
+ X : float or array-like of shape (n_samples, n_features)
94
+
95
+ W : float or array-like of shape (n_samples, n_components)
96
+
97
+ H : float or array-like of shape (n_components, n_features)
98
+
99
+ beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}
100
+ Parameter of the beta-divergence.
101
+ If beta == 2, this is half the Frobenius *squared* norm.
102
+ If beta == 1, this is the generalized Kullback-Leibler divergence.
103
+ If beta == 0, this is the Itakura-Saito divergence.
104
+ Else, this is the general beta-divergence.
105
+
106
+ square_root : bool, default=False
107
+ If True, return np.sqrt(2 * res)
108
+ For beta == 2, it corresponds to the Frobenius norm.
109
+
110
+ Returns
111
+ -------
112
+ res : float
113
+ Beta divergence of X and np.dot(X, H).
114
+ """
115
+ beta = _beta_loss_to_float(beta)
116
+
117
+ # The method can be called with scalars
118
+ if not sp.issparse(X):
119
+ X = np.atleast_2d(X)
120
+ W = np.atleast_2d(W)
121
+ H = np.atleast_2d(H)
122
+
123
+ # Frobenius norm
124
+ if beta == 2:
125
+ # Avoid the creation of the dense np.dot(W, H) if X is sparse.
126
+ if sp.issparse(X):
127
+ norm_X = np.dot(X.data, X.data)
128
+ norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
129
+ cross_prod = trace_dot((X @ H.T), W)
130
+ res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
131
+ else:
132
+ res = squared_norm(X - np.dot(W, H)) / 2.0
133
+
134
+ if square_root:
135
+ return np.sqrt(res * 2)
136
+ else:
137
+ return res
138
+
139
+ if sp.issparse(X):
140
+ # compute np.dot(W, H) only where X is nonzero
141
+ WH_data = _special_sparse_dot(W, H, X).data
142
+ X_data = X.data
143
+ else:
144
+ WH = np.dot(W, H)
145
+ WH_data = WH.ravel()
146
+ X_data = X.ravel()
147
+
148
+ # do not affect the zeros: here 0 ** (-1) = 0 and not infinity
149
+ indices = X_data > EPSILON
150
+ WH_data = WH_data[indices]
151
+ X_data = X_data[indices]
152
+
153
+ # used to avoid division by zero
154
+ WH_data[WH_data < EPSILON] = EPSILON
155
+
156
+ # generalized Kullback-Leibler divergence
157
+ if beta == 1:
158
+ # fast and memory efficient computation of np.sum(np.dot(W, H))
159
+ sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
160
+ # computes np.sum(X * log(X / WH)) only where X is nonzero
161
+ div = X_data / WH_data
162
+ res = np.dot(X_data, np.log(div))
163
+ # add full np.sum(np.dot(W, H)) - np.sum(X)
164
+ res += sum_WH - X_data.sum()
165
+
166
+ # Itakura-Saito divergence
167
+ elif beta == 0:
168
+ div = X_data / WH_data
169
+ res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
170
+
171
+ # beta-divergence, beta not in (0, 1, 2)
172
+ else:
173
+ if sp.issparse(X):
174
+ # slow loop, but memory efficient computation of :
175
+ # np.sum(np.dot(W, H) ** beta)
176
+ sum_WH_beta = 0
177
+ for i in range(X.shape[1]):
178
+ sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
179
+
180
+ else:
181
+ sum_WH_beta = np.sum(WH**beta)
182
+
183
+ sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
184
+ res = (X_data**beta).sum() - beta * sum_X_WH
185
+ res += sum_WH_beta * (beta - 1)
186
+ res /= beta * (beta - 1)
187
+
188
+ if square_root:
189
+ res = max(res, 0) # avoid negative number due to rounding errors
190
+ return np.sqrt(2 * res)
191
+ else:
192
+ return res
193
+
194
+
195
+ def _special_sparse_dot(W, H, X):
196
+ """Computes np.dot(W, H), only where X is non zero."""
197
+ if sp.issparse(X):
198
+ ii, jj = X.nonzero()
199
+ n_vals = ii.shape[0]
200
+ dot_vals = np.empty(n_vals)
201
+ n_components = W.shape[1]
202
+
203
+ batch_size = max(n_components, n_vals // n_components)
204
+ for start in range(0, n_vals, batch_size):
205
+ batch = slice(start, start + batch_size)
206
+ dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(
207
+ axis=1
208
+ )
209
+
210
+ WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
211
+ return WH.tocsr()
212
+ else:
213
+ return np.dot(W, H)
214
+
215
+
216
+ def _beta_loss_to_float(beta_loss):
217
+ """Convert string beta_loss to float."""
218
+ beta_loss_map = {"frobenius": 2, "kullback-leibler": 1, "itakura-saito": 0}
219
+ if isinstance(beta_loss, str):
220
+ beta_loss = beta_loss_map[beta_loss]
221
+ return beta_loss
222
+
223
+
224
+ def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None):
225
+ """Algorithms for NMF initialization.
226
+
227
+ Computes an initial guess for the non-negative
228
+ rank k matrix approximation for X: X = WH.
229
+
230
+ Parameters
231
+ ----------
232
+ X : array-like of shape (n_samples, n_features)
233
+ The data matrix to be decomposed.
234
+
235
+ n_components : int
236
+ The number of components desired in the approximation.
237
+
238
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None
239
+ Method used to initialize the procedure.
240
+ Valid options:
241
+
242
+ - None: 'nndsvda' if n_components <= min(n_samples, n_features),
243
+ otherwise 'random'.
244
+
245
+ - 'random': non-negative random matrices, scaled with:
246
+ sqrt(X.mean() / n_components)
247
+
248
+ - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
249
+ initialization (better for sparseness)
250
+
251
+ - 'nndsvda': NNDSVD with zeros filled with the average of X
252
+ (better when sparsity is not desired)
253
+
254
+ - 'nndsvdar': NNDSVD with zeros filled with small random values
255
+ (generally faster, less accurate alternative to NNDSVDa
256
+ for when sparsity is not desired)
257
+
258
+ - 'custom': use custom matrices W and H
259
+
260
+ .. versionchanged:: 1.1
261
+ When `init=None` and n_components is less than n_samples and n_features
262
+ defaults to `nndsvda` instead of `nndsvd`.
263
+
264
+ eps : float, default=1e-6
265
+ Truncate all values less then this in output to zero.
266
+
267
+ random_state : int, RandomState instance or None, default=None
268
+ Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
269
+ reproducible results across multiple function calls.
270
+ See :term:`Glossary <random_state>`.
271
+
272
+ Returns
273
+ -------
274
+ W : array-like of shape (n_samples, n_components)
275
+ Initial guesses for solving X ~= WH.
276
+
277
+ H : array-like of shape (n_components, n_features)
278
+ Initial guesses for solving X ~= WH.
279
+
280
+ References
281
+ ----------
282
+ C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
283
+ nonnegative matrix factorization - Pattern Recognition, 2008
284
+ http://tinyurl.com/nndsvd
285
+ """
286
+ check_non_negative(X, "NMF initialization")
287
+ n_samples, n_features = X.shape
288
+
289
+ if (
290
+ init is not None
291
+ and init != "random"
292
+ and n_components > min(n_samples, n_features)
293
+ ):
294
+ raise ValueError(
295
+ "init = '{}' can only be used when "
296
+ "n_components <= min(n_samples, n_features)".format(init)
297
+ )
298
+
299
+ if init is None:
300
+ if n_components <= min(n_samples, n_features):
301
+ init = "nndsvda"
302
+ else:
303
+ init = "random"
304
+
305
+ # Random initialization
306
+ if init == "random":
307
+ avg = np.sqrt(X.mean() / n_components)
308
+ rng = check_random_state(random_state)
309
+ H = avg * rng.standard_normal(size=(n_components, n_features)).astype(
310
+ X.dtype, copy=False
311
+ )
312
+ W = avg * rng.standard_normal(size=(n_samples, n_components)).astype(
313
+ X.dtype, copy=False
314
+ )
315
+ np.abs(H, out=H)
316
+ np.abs(W, out=W)
317
+ return W, H
318
+
319
+ # NNDSVD initialization
320
+ U, S, V = randomized_svd(X, n_components, random_state=random_state)
321
+ W = np.zeros_like(U)
322
+ H = np.zeros_like(V)
323
+
324
+ # The leading singular triplet is non-negative
325
+ # so it can be used as is for initialization.
326
+ W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
327
+ H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
328
+
329
+ for j in range(1, n_components):
330
+ x, y = U[:, j], V[j, :]
331
+
332
+ # extract positive and negative parts of column vectors
333
+ x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
334
+ x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
335
+
336
+ # and their norms
337
+ x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
338
+ x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
339
+
340
+ m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
341
+
342
+ # choose update
343
+ if m_p > m_n:
344
+ u = x_p / x_p_nrm
345
+ v = y_p / y_p_nrm
346
+ sigma = m_p
347
+ else:
348
+ u = x_n / x_n_nrm
349
+ v = y_n / y_n_nrm
350
+ sigma = m_n
351
+
352
+ lbd = np.sqrt(S[j] * sigma)
353
+ W[:, j] = lbd * u
354
+ H[j, :] = lbd * v
355
+
356
+ W[W < eps] = 0
357
+ H[H < eps] = 0
358
+
359
+ if init == "nndsvd":
360
+ pass
361
+ elif init == "nndsvda":
362
+ avg = X.mean()
363
+ W[W == 0] = avg
364
+ H[H == 0] = avg
365
+ elif init == "nndsvdar":
366
+ rng = check_random_state(random_state)
367
+ avg = X.mean()
368
+ W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100)
369
+ H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100)
370
+ else:
371
+ raise ValueError(
372
+ "Invalid init parameter: got %r instead of one of %r"
373
+ % (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar"))
374
+ )
375
+
376
+ return W, H
377
+
378
+
379
+ def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state):
380
+ """Helper function for _fit_coordinate_descent.
381
+
382
+ Update W to minimize the objective function, iterating once over all
383
+ coordinates. By symmetry, to update H, one can call
384
+ _update_coordinate_descent(X.T, Ht, W, ...).
385
+
386
+ """
387
+ n_components = Ht.shape[1]
388
+
389
+ HHt = np.dot(Ht.T, Ht)
390
+ XHt = safe_sparse_dot(X, Ht)
391
+
392
+ # L2 regularization corresponds to increase of the diagonal of HHt
393
+ if l2_reg != 0.0:
394
+ # adds l2_reg only on the diagonal
395
+ HHt.flat[:: n_components + 1] += l2_reg
396
+ # L1 regularization corresponds to decrease of each element of XHt
397
+ if l1_reg != 0.0:
398
+ XHt -= l1_reg
399
+
400
+ if shuffle:
401
+ permutation = random_state.permutation(n_components)
402
+ else:
403
+ permutation = np.arange(n_components)
404
+ # The following seems to be required on 64-bit Windows w/ Python 3.5.
405
+ permutation = np.asarray(permutation, dtype=np.intp)
406
+ return _update_cdnmf_fast(W, HHt, XHt, permutation)
407
+
408
+
409
+ def _fit_coordinate_descent(
410
+ X,
411
+ W,
412
+ H,
413
+ tol=1e-4,
414
+ max_iter=200,
415
+ l1_reg_W=0,
416
+ l1_reg_H=0,
417
+ l2_reg_W=0,
418
+ l2_reg_H=0,
419
+ update_H=True,
420
+ verbose=0,
421
+ shuffle=False,
422
+ random_state=None,
423
+ ):
424
+ """Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
425
+
426
+ The objective function is minimized with an alternating minimization of W
427
+ and H. Each minimization is done with a cyclic (up to a permutation of the
428
+ features) Coordinate Descent.
429
+
430
+ Parameters
431
+ ----------
432
+ X : array-like of shape (n_samples, n_features)
433
+ Constant matrix.
434
+
435
+ W : array-like of shape (n_samples, n_components)
436
+ Initial guess for the solution.
437
+
438
+ H : array-like of shape (n_components, n_features)
439
+ Initial guess for the solution.
440
+
441
+ tol : float, default=1e-4
442
+ Tolerance of the stopping condition.
443
+
444
+ max_iter : int, default=200
445
+ Maximum number of iterations before timing out.
446
+
447
+ l1_reg_W : float, default=0.
448
+ L1 regularization parameter for W.
449
+
450
+ l1_reg_H : float, default=0.
451
+ L1 regularization parameter for H.
452
+
453
+ l2_reg_W : float, default=0.
454
+ L2 regularization parameter for W.
455
+
456
+ l2_reg_H : float, default=0.
457
+ L2 regularization parameter for H.
458
+
459
+ update_H : bool, default=True
460
+ Set to True, both W and H will be estimated from initial guesses.
461
+ Set to False, only W will be estimated.
462
+
463
+ verbose : int, default=0
464
+ The verbosity level.
465
+
466
+ shuffle : bool, default=False
467
+ If true, randomize the order of coordinates in the CD solver.
468
+
469
+ random_state : int, RandomState instance or None, default=None
470
+ Used to randomize the coordinates in the CD solver, when
471
+ ``shuffle`` is set to ``True``. Pass an int for reproducible
472
+ results across multiple function calls.
473
+ See :term:`Glossary <random_state>`.
474
+
475
+ Returns
476
+ -------
477
+ W : ndarray of shape (n_samples, n_components)
478
+ Solution to the non-negative least squares problem.
479
+
480
+ H : ndarray of shape (n_components, n_features)
481
+ Solution to the non-negative least squares problem.
482
+
483
+ n_iter : int
484
+ The number of iterations done by the algorithm.
485
+
486
+ References
487
+ ----------
488
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
489
+ factorizations" <10.1587/transfun.E92.A.708>`
490
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
491
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
492
+ """
493
+ # so W and Ht are both in C order in memory
494
+ Ht = check_array(H.T, order="C")
495
+ X = check_array(X, accept_sparse="csr")
496
+
497
+ rng = check_random_state(random_state)
498
+
499
+ for n_iter in range(1, max_iter + 1):
500
+ violation = 0.0
501
+
502
+ # Update W
503
+ violation += _update_coordinate_descent(
504
+ X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng
505
+ )
506
+ # Update H
507
+ if update_H:
508
+ violation += _update_coordinate_descent(
509
+ X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng
510
+ )
511
+
512
+ if n_iter == 1:
513
+ violation_init = violation
514
+
515
+ if violation_init == 0:
516
+ break
517
+
518
+ if verbose:
519
+ print("violation:", violation / violation_init)
520
+
521
+ if violation / violation_init <= tol:
522
+ if verbose:
523
+ print("Converged at iteration", n_iter + 1)
524
+ break
525
+
526
+ return W, Ht.T, n_iter
527
+
528
+
529
+ def _multiplicative_update_w(
530
+ X,
531
+ W,
532
+ H,
533
+ beta_loss,
534
+ l1_reg_W,
535
+ l2_reg_W,
536
+ gamma,
537
+ H_sum=None,
538
+ HHt=None,
539
+ XHt=None,
540
+ update_H=True,
541
+ ):
542
+ """Update W in Multiplicative Update NMF."""
543
+ if beta_loss == 2:
544
+ # Numerator
545
+ if XHt is None:
546
+ XHt = safe_sparse_dot(X, H.T)
547
+ if update_H:
548
+ # avoid a copy of XHt, which will be re-computed (update_H=True)
549
+ numerator = XHt
550
+ else:
551
+ # preserve the XHt, which is not re-computed (update_H=False)
552
+ numerator = XHt.copy()
553
+
554
+ # Denominator
555
+ if HHt is None:
556
+ HHt = np.dot(H, H.T)
557
+ denominator = np.dot(W, HHt)
558
+
559
+ else:
560
+ # Numerator
561
+ # if X is sparse, compute WH only where X is non zero
562
+ WH_safe_X = _special_sparse_dot(W, H, X)
563
+ if sp.issparse(X):
564
+ WH_safe_X_data = WH_safe_X.data
565
+ X_data = X.data
566
+ else:
567
+ WH_safe_X_data = WH_safe_X
568
+ X_data = X
569
+ # copy used in the Denominator
570
+ WH = WH_safe_X.copy()
571
+ if beta_loss - 1.0 < 0:
572
+ WH[WH < EPSILON] = EPSILON
573
+
574
+ # to avoid taking a negative power of zero
575
+ if beta_loss - 2.0 < 0:
576
+ WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
577
+
578
+ if beta_loss == 1:
579
+ np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
580
+ elif beta_loss == 0:
581
+ # speeds up computation time
582
+ # refer to /numpy/numpy/issues/9363
583
+ WH_safe_X_data **= -1
584
+ WH_safe_X_data **= 2
585
+ # element-wise multiplication
586
+ WH_safe_X_data *= X_data
587
+ else:
588
+ WH_safe_X_data **= beta_loss - 2
589
+ # element-wise multiplication
590
+ WH_safe_X_data *= X_data
591
+
592
+ # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
593
+ numerator = safe_sparse_dot(WH_safe_X, H.T)
594
+
595
+ # Denominator
596
+ if beta_loss == 1:
597
+ if H_sum is None:
598
+ H_sum = np.sum(H, axis=1) # shape(n_components, )
599
+ denominator = H_sum[np.newaxis, :]
600
+
601
+ else:
602
+ # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
603
+ if sp.issparse(X):
604
+ # memory efficient computation
605
+ # (compute row by row, avoiding the dense matrix WH)
606
+ WHHt = np.empty(W.shape)
607
+ for i in range(X.shape[0]):
608
+ WHi = np.dot(W[i, :], H)
609
+ if beta_loss - 1 < 0:
610
+ WHi[WHi < EPSILON] = EPSILON
611
+ WHi **= beta_loss - 1
612
+ WHHt[i, :] = np.dot(WHi, H.T)
613
+ else:
614
+ WH **= beta_loss - 1
615
+ WHHt = np.dot(WH, H.T)
616
+ denominator = WHHt
617
+
618
+ # Add L1 and L2 regularization
619
+ if l1_reg_W > 0:
620
+ denominator += l1_reg_W
621
+ if l2_reg_W > 0:
622
+ denominator = denominator + l2_reg_W * W
623
+ denominator[denominator == 0] = EPSILON
624
+
625
+ numerator /= denominator
626
+ delta_W = numerator
627
+
628
+ # gamma is in ]0, 1]
629
+ if gamma != 1:
630
+ delta_W **= gamma
631
+
632
+ W *= delta_W
633
+
634
+ return W, H_sum, HHt, XHt
635
+
636
+
637
+ def _multiplicative_update_h(
638
+ X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None
639
+ ):
640
+ """update H in Multiplicative Update NMF."""
641
+ if beta_loss == 2:
642
+ numerator = safe_sparse_dot(W.T, X)
643
+ denominator = np.linalg.multi_dot([W.T, W, H])
644
+
645
+ else:
646
+ # Numerator
647
+ WH_safe_X = _special_sparse_dot(W, H, X)
648
+ if sp.issparse(X):
649
+ WH_safe_X_data = WH_safe_X.data
650
+ X_data = X.data
651
+ else:
652
+ WH_safe_X_data = WH_safe_X
653
+ X_data = X
654
+ # copy used in the Denominator
655
+ WH = WH_safe_X.copy()
656
+ if beta_loss - 1.0 < 0:
657
+ WH[WH < EPSILON] = EPSILON
658
+
659
+ # to avoid division by zero
660
+ if beta_loss - 2.0 < 0:
661
+ WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
662
+
663
+ if beta_loss == 1:
664
+ np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
665
+ elif beta_loss == 0:
666
+ # speeds up computation time
667
+ # refer to /numpy/numpy/issues/9363
668
+ WH_safe_X_data **= -1
669
+ WH_safe_X_data **= 2
670
+ # element-wise multiplication
671
+ WH_safe_X_data *= X_data
672
+ else:
673
+ WH_safe_X_data **= beta_loss - 2
674
+ # element-wise multiplication
675
+ WH_safe_X_data *= X_data
676
+
677
+ # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
678
+ numerator = safe_sparse_dot(W.T, WH_safe_X)
679
+
680
+ # Denominator
681
+ if beta_loss == 1:
682
+ W_sum = np.sum(W, axis=0) # shape(n_components, )
683
+ W_sum[W_sum == 0] = 1.0
684
+ denominator = W_sum[:, np.newaxis]
685
+
686
+ # beta_loss not in (1, 2)
687
+ else:
688
+ # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
689
+ if sp.issparse(X):
690
+ # memory efficient computation
691
+ # (compute column by column, avoiding the dense matrix WH)
692
+ WtWH = np.empty(H.shape)
693
+ for i in range(X.shape[1]):
694
+ WHi = np.dot(W, H[:, i])
695
+ if beta_loss - 1 < 0:
696
+ WHi[WHi < EPSILON] = EPSILON
697
+ WHi **= beta_loss - 1
698
+ WtWH[:, i] = np.dot(W.T, WHi)
699
+ else:
700
+ WH **= beta_loss - 1
701
+ WtWH = np.dot(W.T, WH)
702
+ denominator = WtWH
703
+
704
+ # Add L1 and L2 regularization
705
+ if l1_reg_H > 0:
706
+ denominator += l1_reg_H
707
+ if l2_reg_H > 0:
708
+ denominator = denominator + l2_reg_H * H
709
+ denominator[denominator == 0] = EPSILON
710
+
711
+ if A is not None and B is not None:
712
+ # Updates for the online nmf
713
+ if gamma != 1:
714
+ H **= 1 / gamma
715
+ numerator *= H
716
+ A *= rho
717
+ B *= rho
718
+ A += numerator
719
+ B += denominator
720
+ H = A / B
721
+
722
+ if gamma != 1:
723
+ H **= gamma
724
+ else:
725
+ delta_H = numerator
726
+ delta_H /= denominator
727
+ if gamma != 1:
728
+ delta_H **= gamma
729
+ H *= delta_H
730
+
731
+ return H
732
+
733
+
734
+ def _fit_multiplicative_update(
735
+ X,
736
+ W,
737
+ H,
738
+ beta_loss="frobenius",
739
+ max_iter=200,
740
+ tol=1e-4,
741
+ l1_reg_W=0,
742
+ l1_reg_H=0,
743
+ l2_reg_W=0,
744
+ l2_reg_H=0,
745
+ update_H=True,
746
+ verbose=0,
747
+ ):
748
+ """Compute Non-negative Matrix Factorization with Multiplicative Update.
749
+
750
+ The objective function is _beta_divergence(X, WH) and is minimized with an
751
+ alternating minimization of W and H. Each minimization is done with a
752
+ Multiplicative Update.
753
+
754
+ Parameters
755
+ ----------
756
+ X : array-like of shape (n_samples, n_features)
757
+ Constant input matrix.
758
+
759
+ W : array-like of shape (n_samples, n_components)
760
+ Initial guess for the solution.
761
+
762
+ H : array-like of shape (n_components, n_features)
763
+ Initial guess for the solution.
764
+
765
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
766
+ 'itakura-saito'}, default='frobenius'
767
+ String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
768
+ Beta divergence to be minimized, measuring the distance between X
769
+ and the dot product WH. Note that values different from 'frobenius'
770
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
771
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
772
+ matrix X cannot contain zeros.
773
+
774
+ max_iter : int, default=200
775
+ Number of iterations.
776
+
777
+ tol : float, default=1e-4
778
+ Tolerance of the stopping condition.
779
+
780
+ l1_reg_W : float, default=0.
781
+ L1 regularization parameter for W.
782
+
783
+ l1_reg_H : float, default=0.
784
+ L1 regularization parameter for H.
785
+
786
+ l2_reg_W : float, default=0.
787
+ L2 regularization parameter for W.
788
+
789
+ l2_reg_H : float, default=0.
790
+ L2 regularization parameter for H.
791
+
792
+ update_H : bool, default=True
793
+ Set to True, both W and H will be estimated from initial guesses.
794
+ Set to False, only W will be estimated.
795
+
796
+ verbose : int, default=0
797
+ The verbosity level.
798
+
799
+ Returns
800
+ -------
801
+ W : ndarray of shape (n_samples, n_components)
802
+ Solution to the non-negative least squares problem.
803
+
804
+ H : ndarray of shape (n_components, n_features)
805
+ Solution to the non-negative least squares problem.
806
+
807
+ n_iter : int
808
+ The number of iterations done by the algorithm.
809
+
810
+ References
811
+ ----------
812
+ Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix
813
+ Factorization. Adv. Neural Inform. Process. Syst.. 13.
814
+ Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
815
+ factorization with the beta-divergence. Neural Computation, 23(9).
816
+ """
817
+ start_time = time.time()
818
+
819
+ beta_loss = _beta_loss_to_float(beta_loss)
820
+
821
+ # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
822
+ if beta_loss < 1:
823
+ gamma = 1.0 / (2.0 - beta_loss)
824
+ elif beta_loss > 2:
825
+ gamma = 1.0 / (beta_loss - 1.0)
826
+ else:
827
+ gamma = 1.0
828
+
829
+ # used for the convergence criterion
830
+ error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
831
+ previous_error = error_at_init
832
+
833
+ H_sum, HHt, XHt = None, None, None
834
+ for n_iter in range(1, max_iter + 1):
835
+ # update W
836
+ # H_sum, HHt and XHt are saved and reused if not update_H
837
+ W, H_sum, HHt, XHt = _multiplicative_update_w(
838
+ X,
839
+ W,
840
+ H,
841
+ beta_loss=beta_loss,
842
+ l1_reg_W=l1_reg_W,
843
+ l2_reg_W=l2_reg_W,
844
+ gamma=gamma,
845
+ H_sum=H_sum,
846
+ HHt=HHt,
847
+ XHt=XHt,
848
+ update_H=update_H,
849
+ )
850
+
851
+ # necessary for stability with beta_loss < 1
852
+ if beta_loss < 1:
853
+ W[W < np.finfo(np.float64).eps] = 0.0
854
+
855
+ # update H (only at fit or fit_transform)
856
+ if update_H:
857
+ H = _multiplicative_update_h(
858
+ X,
859
+ W,
860
+ H,
861
+ beta_loss=beta_loss,
862
+ l1_reg_H=l1_reg_H,
863
+ l2_reg_H=l2_reg_H,
864
+ gamma=gamma,
865
+ )
866
+
867
+ # These values will be recomputed since H changed
868
+ H_sum, HHt, XHt = None, None, None
869
+
870
+ # necessary for stability with beta_loss < 1
871
+ if beta_loss <= 1:
872
+ H[H < np.finfo(np.float64).eps] = 0.0
873
+
874
+ # test convergence criterion every 10 iterations
875
+ if tol > 0 and n_iter % 10 == 0:
876
+ error = _beta_divergence(X, W, H, beta_loss, square_root=True)
877
+
878
+ if verbose:
879
+ iter_time = time.time()
880
+ print(
881
+ "Epoch %02d reached after %.3f seconds, error: %f"
882
+ % (n_iter, iter_time - start_time, error)
883
+ )
884
+
885
+ if (previous_error - error) / error_at_init < tol:
886
+ break
887
+ previous_error = error
888
+
889
+ # do not print if we have already printed in the convergence test
890
+ if verbose and (tol == 0 or n_iter % 10 != 0):
891
+ end_time = time.time()
892
+ print(
893
+ "Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)
894
+ )
895
+
896
+ return W, H, n_iter
897
+
898
+
899
+ @validate_params(
900
+ {
901
+ "X": ["array-like", "sparse matrix"],
902
+ "W": ["array-like", None],
903
+ "H": ["array-like", None],
904
+ "update_H": ["boolean"],
905
+ },
906
+ prefer_skip_nested_validation=False,
907
+ )
908
+ def non_negative_factorization(
909
+ X,
910
+ W=None,
911
+ H=None,
912
+ n_components="warn",
913
+ *,
914
+ init=None,
915
+ update_H=True,
916
+ solver="cd",
917
+ beta_loss="frobenius",
918
+ tol=1e-4,
919
+ max_iter=200,
920
+ alpha_W=0.0,
921
+ alpha_H="same",
922
+ l1_ratio=0.0,
923
+ random_state=None,
924
+ verbose=0,
925
+ shuffle=False,
926
+ ):
927
+ """Compute Non-negative Matrix Factorization (NMF).
928
+
929
+ Find two non-negative matrices (W, H) whose product approximates the non-
930
+ negative matrix X. This factorization can be used for example for
931
+ dimensionality reduction, source separation or topic extraction.
932
+
933
+ The objective function is:
934
+
935
+ .. math::
936
+
937
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
938
+
939
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
940
+
941
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
942
+
943
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
944
+
945
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
946
+
947
+ Where:
948
+
949
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
950
+
951
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
952
+
953
+ The generic norm :math:`||X - WH||_{loss}^2` may represent
954
+ the Frobenius norm or another supported beta-divergence loss.
955
+ The choice between options is controlled by the `beta_loss` parameter.
956
+
957
+ The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
958
+ `H` to keep their impact balanced with respect to one another and to the data fit
959
+ term as independent as possible of the size `n_samples` of the training set.
960
+
961
+ The objective function is minimized with an alternating minimization of W
962
+ and H. If H is given and update_H=False, it solves for W only.
963
+
964
+ Note that the transformed data is named W and the components matrix is named H. In
965
+ the NMF literature, the naming convention is usually the opposite since the data
966
+ matrix X is transposed.
967
+
968
+ Parameters
969
+ ----------
970
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
971
+ Constant matrix.
972
+
973
+ W : array-like of shape (n_samples, n_components), default=None
974
+ If `init='custom'`, it is used as initial guess for the solution.
975
+ If `update_H=False`, it is initialised as an array of zeros, unless
976
+ `solver='mu'`, then it is filled with values calculated by
977
+ `np.sqrt(X.mean() / self._n_components)`.
978
+ If `None`, uses the initialisation method specified in `init`.
979
+
980
+ H : array-like of shape (n_components, n_features), default=None
981
+ If `init='custom'`, it is used as initial guess for the solution.
982
+ If `update_H=False`, it is used as a constant, to solve for W only.
983
+ If `None`, uses the initialisation method specified in `init`.
984
+
985
+ n_components : int or {'auto'} or None, default=None
986
+ Number of components, if n_components is not set all features
987
+ are kept.
988
+ If `n_components='auto'`, the number of components is automatically inferred
989
+ from `W` or `H` shapes.
990
+
991
+ .. versionchanged:: 1.4
992
+ Added `'auto'` value.
993
+
994
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
995
+ Method used to initialize the procedure.
996
+
997
+ Valid options:
998
+
999
+ - None: 'nndsvda' if n_components < n_features, otherwise 'random'.
1000
+ - 'random': non-negative random matrices, scaled with:
1001
+ `sqrt(X.mean() / n_components)`
1002
+ - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
1003
+ initialization (better for sparseness)
1004
+ - 'nndsvda': NNDSVD with zeros filled with the average of X
1005
+ (better when sparsity is not desired)
1006
+ - 'nndsvdar': NNDSVD with zeros filled with small random values
1007
+ (generally faster, less accurate alternative to NNDSVDa
1008
+ for when sparsity is not desired)
1009
+ - 'custom': If `update_H=True`, use custom matrices W and H which must both
1010
+ be provided. If `update_H=False`, then only custom matrix H is used.
1011
+
1012
+ .. versionchanged:: 0.23
1013
+ The default value of `init` changed from 'random' to None in 0.23.
1014
+
1015
+ .. versionchanged:: 1.1
1016
+ When `init=None` and n_components is less than n_samples and n_features
1017
+ defaults to `nndsvda` instead of `nndsvd`.
1018
+
1019
+ update_H : bool, default=True
1020
+ Set to True, both W and H will be estimated from initial guesses.
1021
+ Set to False, only W will be estimated.
1022
+
1023
+ solver : {'cd', 'mu'}, default='cd'
1024
+ Numerical solver to use:
1025
+
1026
+ - 'cd' is a Coordinate Descent solver that uses Fast Hierarchical
1027
+ Alternating Least Squares (Fast HALS).
1028
+ - 'mu' is a Multiplicative Update solver.
1029
+
1030
+ .. versionadded:: 0.17
1031
+ Coordinate Descent solver.
1032
+
1033
+ .. versionadded:: 0.19
1034
+ Multiplicative Update solver.
1035
+
1036
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1037
+ 'itakura-saito'}, default='frobenius'
1038
+ Beta divergence to be minimized, measuring the distance between X
1039
+ and the dot product WH. Note that values different from 'frobenius'
1040
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1041
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
1042
+ matrix X cannot contain zeros. Used only in 'mu' solver.
1043
+
1044
+ .. versionadded:: 0.19
1045
+
1046
+ tol : float, default=1e-4
1047
+ Tolerance of the stopping condition.
1048
+
1049
+ max_iter : int, default=200
1050
+ Maximum number of iterations before timing out.
1051
+
1052
+ alpha_W : float, default=0.0
1053
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1054
+ (default) to have no regularization on `W`.
1055
+
1056
+ .. versionadded:: 1.0
1057
+
1058
+ alpha_H : float or "same", default="same"
1059
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1060
+ have no regularization on `H`. If "same" (default), it takes the same value as
1061
+ `alpha_W`.
1062
+
1063
+ .. versionadded:: 1.0
1064
+
1065
+ l1_ratio : float, default=0.0
1066
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1067
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1068
+ (aka Frobenius Norm).
1069
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1070
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1071
+
1072
+ random_state : int, RandomState instance or None, default=None
1073
+ Used for NMF initialisation (when ``init`` == 'nndsvdar' or
1074
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1075
+ results across multiple function calls.
1076
+ See :term:`Glossary <random_state>`.
1077
+
1078
+ verbose : int, default=0
1079
+ The verbosity level.
1080
+
1081
+ shuffle : bool, default=False
1082
+ If true, randomize the order of coordinates in the CD solver.
1083
+
1084
+ Returns
1085
+ -------
1086
+ W : ndarray of shape (n_samples, n_components)
1087
+ Solution to the non-negative least squares problem.
1088
+
1089
+ H : ndarray of shape (n_components, n_features)
1090
+ Solution to the non-negative least squares problem.
1091
+
1092
+ n_iter : int
1093
+ Actual number of iterations.
1094
+
1095
+ References
1096
+ ----------
1097
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1098
+ factorizations" <10.1587/transfun.E92.A.708>`
1099
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1100
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1101
+
1102
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1103
+ beta-divergence" <10.1162/NECO_a_00168>`
1104
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1105
+
1106
+ Examples
1107
+ --------
1108
+ >>> import numpy as np
1109
+ >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
1110
+ >>> from sklearn.decomposition import non_negative_factorization
1111
+ >>> W, H, n_iter = non_negative_factorization(
1112
+ ... X, n_components=2, init='random', random_state=0)
1113
+ """
1114
+ est = NMF(
1115
+ n_components=n_components,
1116
+ init=init,
1117
+ solver=solver,
1118
+ beta_loss=beta_loss,
1119
+ tol=tol,
1120
+ max_iter=max_iter,
1121
+ random_state=random_state,
1122
+ alpha_W=alpha_W,
1123
+ alpha_H=alpha_H,
1124
+ l1_ratio=l1_ratio,
1125
+ verbose=verbose,
1126
+ shuffle=shuffle,
1127
+ )
1128
+ est._validate_params()
1129
+
1130
+ X = check_array(X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32])
1131
+
1132
+ with config_context(assume_finite=True):
1133
+ W, H, n_iter = est._fit_transform(X, W=W, H=H, update_H=update_H)
1134
+
1135
+ return W, H, n_iter
1136
+
1137
+
1138
+ class _BaseNMF(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, ABC):
1139
+ """Base class for NMF and MiniBatchNMF."""
1140
+
1141
+ # This prevents ``set_split_inverse_transform`` to be generated for the
1142
+ # non-standard ``W`` arg on ``inverse_transform``.
1143
+ # TODO: remove when W is removed in v1.5 for inverse_transform
1144
+ __metadata_request__inverse_transform = {"W": metadata_routing.UNUSED}
1145
+
1146
+ _parameter_constraints: dict = {
1147
+ "n_components": [
1148
+ Interval(Integral, 1, None, closed="left"),
1149
+ None,
1150
+ StrOptions({"auto"}),
1151
+ Hidden(StrOptions({"warn"})),
1152
+ ],
1153
+ "init": [
1154
+ StrOptions({"random", "nndsvd", "nndsvda", "nndsvdar", "custom"}),
1155
+ None,
1156
+ ],
1157
+ "beta_loss": [
1158
+ StrOptions({"frobenius", "kullback-leibler", "itakura-saito"}),
1159
+ Real,
1160
+ ],
1161
+ "tol": [Interval(Real, 0, None, closed="left")],
1162
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
1163
+ "random_state": ["random_state"],
1164
+ "alpha_W": [Interval(Real, 0, None, closed="left")],
1165
+ "alpha_H": [Interval(Real, 0, None, closed="left"), StrOptions({"same"})],
1166
+ "l1_ratio": [Interval(Real, 0, 1, closed="both")],
1167
+ "verbose": ["verbose"],
1168
+ }
1169
+
1170
+ def __init__(
1171
+ self,
1172
+ n_components="warn",
1173
+ *,
1174
+ init=None,
1175
+ beta_loss="frobenius",
1176
+ tol=1e-4,
1177
+ max_iter=200,
1178
+ random_state=None,
1179
+ alpha_W=0.0,
1180
+ alpha_H="same",
1181
+ l1_ratio=0.0,
1182
+ verbose=0,
1183
+ ):
1184
+ self.n_components = n_components
1185
+ self.init = init
1186
+ self.beta_loss = beta_loss
1187
+ self.tol = tol
1188
+ self.max_iter = max_iter
1189
+ self.random_state = random_state
1190
+ self.alpha_W = alpha_W
1191
+ self.alpha_H = alpha_H
1192
+ self.l1_ratio = l1_ratio
1193
+ self.verbose = verbose
1194
+
1195
+ def _check_params(self, X):
1196
+ # n_components
1197
+ self._n_components = self.n_components
1198
+ if self.n_components == "warn":
1199
+ warnings.warn(
1200
+ (
1201
+ "The default value of `n_components` will change from `None` to"
1202
+ " `'auto'` in 1.6. Set the value of `n_components` to `None`"
1203
+ " explicitly to suppress the warning."
1204
+ ),
1205
+ FutureWarning,
1206
+ )
1207
+ self._n_components = None # Keeping the old default value
1208
+ if self._n_components is None:
1209
+ self._n_components = X.shape[1]
1210
+
1211
+ # beta_loss
1212
+ self._beta_loss = _beta_loss_to_float(self.beta_loss)
1213
+
1214
+ def _check_w_h(self, X, W, H, update_H):
1215
+ """Check W and H, or initialize them."""
1216
+ n_samples, n_features = X.shape
1217
+
1218
+ if self.init == "custom" and update_H:
1219
+ _check_init(H, (self._n_components, n_features), "NMF (input H)")
1220
+ _check_init(W, (n_samples, self._n_components), "NMF (input W)")
1221
+ if self._n_components == "auto":
1222
+ self._n_components = H.shape[0]
1223
+
1224
+ if H.dtype != X.dtype or W.dtype != X.dtype:
1225
+ raise TypeError(
1226
+ "H and W should have the same dtype as X. Got "
1227
+ "H.dtype = {} and W.dtype = {}.".format(H.dtype, W.dtype)
1228
+ )
1229
+
1230
+ elif not update_H:
1231
+ if W is not None:
1232
+ warnings.warn(
1233
+ "When update_H=False, the provided initial W is not used.",
1234
+ RuntimeWarning,
1235
+ )
1236
+
1237
+ _check_init(H, (self._n_components, n_features), "NMF (input H)")
1238
+ if self._n_components == "auto":
1239
+ self._n_components = H.shape[0]
1240
+
1241
+ if H.dtype != X.dtype:
1242
+ raise TypeError(
1243
+ "H should have the same dtype as X. Got H.dtype = {}.".format(
1244
+ H.dtype
1245
+ )
1246
+ )
1247
+
1248
+ # 'mu' solver should not be initialized by zeros
1249
+ if self.solver == "mu":
1250
+ avg = np.sqrt(X.mean() / self._n_components)
1251
+ W = np.full((n_samples, self._n_components), avg, dtype=X.dtype)
1252
+ else:
1253
+ W = np.zeros((n_samples, self._n_components), dtype=X.dtype)
1254
+
1255
+ else:
1256
+ if W is not None or H is not None:
1257
+ warnings.warn(
1258
+ (
1259
+ "When init!='custom', provided W or H are ignored. Set "
1260
+ " init='custom' to use them as initialization."
1261
+ ),
1262
+ RuntimeWarning,
1263
+ )
1264
+
1265
+ if self._n_components == "auto":
1266
+ self._n_components = X.shape[1]
1267
+
1268
+ W, H = _initialize_nmf(
1269
+ X, self._n_components, init=self.init, random_state=self.random_state
1270
+ )
1271
+
1272
+ return W, H
1273
+
1274
+ def _compute_regularization(self, X):
1275
+ """Compute scaled regularization terms."""
1276
+ n_samples, n_features = X.shape
1277
+ alpha_W = self.alpha_W
1278
+ alpha_H = self.alpha_W if self.alpha_H == "same" else self.alpha_H
1279
+
1280
+ l1_reg_W = n_features * alpha_W * self.l1_ratio
1281
+ l1_reg_H = n_samples * alpha_H * self.l1_ratio
1282
+ l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio)
1283
+ l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio)
1284
+
1285
+ return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
1286
+
1287
+ def fit(self, X, y=None, **params):
1288
+ """Learn a NMF model for the data X.
1289
+
1290
+ Parameters
1291
+ ----------
1292
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1293
+ Training vector, where `n_samples` is the number of samples
1294
+ and `n_features` is the number of features.
1295
+
1296
+ y : Ignored
1297
+ Not used, present for API consistency by convention.
1298
+
1299
+ **params : kwargs
1300
+ Parameters (keyword arguments) and values passed to
1301
+ the fit_transform instance.
1302
+
1303
+ Returns
1304
+ -------
1305
+ self : object
1306
+ Returns the instance itself.
1307
+ """
1308
+ # param validation is done in fit_transform
1309
+
1310
+ self.fit_transform(X, **params)
1311
+ return self
1312
+
1313
+ def inverse_transform(self, Xt=None, W=None):
1314
+ """Transform data back to its original space.
1315
+
1316
+ .. versionadded:: 0.18
1317
+
1318
+ Parameters
1319
+ ----------
1320
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_components)
1321
+ Transformed data matrix.
1322
+
1323
+ W : deprecated
1324
+ Use `Xt` instead.
1325
+
1326
+ .. deprecated:: 1.3
1327
+
1328
+ Returns
1329
+ -------
1330
+ X : ndarray of shape (n_samples, n_features)
1331
+ Returns a data matrix of the original shape.
1332
+ """
1333
+ if Xt is None and W is None:
1334
+ raise TypeError("Missing required positional argument: Xt")
1335
+
1336
+ if W is not None and Xt is not None:
1337
+ raise ValueError("Please provide only `Xt`, and not `W`.")
1338
+
1339
+ if W is not None:
1340
+ warnings.warn(
1341
+ (
1342
+ "Input argument `W` was renamed to `Xt` in v1.3 and will be removed"
1343
+ " in v1.5."
1344
+ ),
1345
+ FutureWarning,
1346
+ )
1347
+ Xt = W
1348
+
1349
+ check_is_fitted(self)
1350
+ return Xt @ self.components_
1351
+
1352
+ @property
1353
+ def _n_features_out(self):
1354
+ """Number of transformed output features."""
1355
+ return self.components_.shape[0]
1356
+
1357
+ def _more_tags(self):
1358
+ return {
1359
+ "requires_positive_X": True,
1360
+ "preserves_dtype": [np.float64, np.float32],
1361
+ }
1362
+
1363
+
1364
+ class NMF(_BaseNMF):
1365
+ """Non-Negative Matrix Factorization (NMF).
1366
+
1367
+ Find two non-negative matrices, i.e. matrices with all non-negative elements, (W, H)
1368
+ whose product approximates the non-negative matrix X. This factorization can be used
1369
+ for example for dimensionality reduction, source separation or topic extraction.
1370
+
1371
+ The objective function is:
1372
+
1373
+ .. math::
1374
+
1375
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
1376
+
1377
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
1378
+
1379
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
1380
+
1381
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
1382
+
1383
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
1384
+
1385
+ Where:
1386
+
1387
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
1388
+
1389
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
1390
+
1391
+ The generic norm :math:`||X - WH||_{loss}` may represent
1392
+ the Frobenius norm or another supported beta-divergence loss.
1393
+ The choice between options is controlled by the `beta_loss` parameter.
1394
+
1395
+ The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
1396
+ `H` to keep their impact balanced with respect to one another and to the data fit
1397
+ term as independent as possible of the size `n_samples` of the training set.
1398
+
1399
+ The objective function is minimized with an alternating minimization of W
1400
+ and H.
1401
+
1402
+ Note that the transformed data is named W and the components matrix is named H. In
1403
+ the NMF literature, the naming convention is usually the opposite since the data
1404
+ matrix X is transposed.
1405
+
1406
+ Read more in the :ref:`User Guide <NMF>`.
1407
+
1408
+ Parameters
1409
+ ----------
1410
+ n_components : int or {'auto'} or None, default=None
1411
+ Number of components, if n_components is not set all features
1412
+ are kept.
1413
+ If `n_components='auto'`, the number of components is automatically inferred
1414
+ from W or H shapes.
1415
+
1416
+ .. versionchanged:: 1.4
1417
+ Added `'auto'` value.
1418
+
1419
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
1420
+ Method used to initialize the procedure.
1421
+ Valid options:
1422
+
1423
+ - `None`: 'nndsvda' if n_components <= min(n_samples, n_features),
1424
+ otherwise random.
1425
+
1426
+ - `'random'`: non-negative random matrices, scaled with:
1427
+ `sqrt(X.mean() / n_components)`
1428
+
1429
+ - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)
1430
+ initialization (better for sparseness)
1431
+
1432
+ - `'nndsvda'`: NNDSVD with zeros filled with the average of X
1433
+ (better when sparsity is not desired)
1434
+
1435
+ - `'nndsvdar'` NNDSVD with zeros filled with small random values
1436
+ (generally faster, less accurate alternative to NNDSVDa
1437
+ for when sparsity is not desired)
1438
+
1439
+ - `'custom'`: Use custom matrices `W` and `H` which must both be provided.
1440
+
1441
+ .. versionchanged:: 1.1
1442
+ When `init=None` and n_components is less than n_samples and n_features
1443
+ defaults to `nndsvda` instead of `nndsvd`.
1444
+
1445
+ solver : {'cd', 'mu'}, default='cd'
1446
+ Numerical solver to use:
1447
+
1448
+ - 'cd' is a Coordinate Descent solver.
1449
+ - 'mu' is a Multiplicative Update solver.
1450
+
1451
+ .. versionadded:: 0.17
1452
+ Coordinate Descent solver.
1453
+
1454
+ .. versionadded:: 0.19
1455
+ Multiplicative Update solver.
1456
+
1457
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1458
+ 'itakura-saito'}, default='frobenius'
1459
+ Beta divergence to be minimized, measuring the distance between X
1460
+ and the dot product WH. Note that values different from 'frobenius'
1461
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1462
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
1463
+ matrix X cannot contain zeros. Used only in 'mu' solver.
1464
+
1465
+ .. versionadded:: 0.19
1466
+
1467
+ tol : float, default=1e-4
1468
+ Tolerance of the stopping condition.
1469
+
1470
+ max_iter : int, default=200
1471
+ Maximum number of iterations before timing out.
1472
+
1473
+ random_state : int, RandomState instance or None, default=None
1474
+ Used for initialisation (when ``init`` == 'nndsvdar' or
1475
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1476
+ results across multiple function calls.
1477
+ See :term:`Glossary <random_state>`.
1478
+
1479
+ alpha_W : float, default=0.0
1480
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1481
+ (default) to have no regularization on `W`.
1482
+
1483
+ .. versionadded:: 1.0
1484
+
1485
+ alpha_H : float or "same", default="same"
1486
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1487
+ have no regularization on `H`. If "same" (default), it takes the same value as
1488
+ `alpha_W`.
1489
+
1490
+ .. versionadded:: 1.0
1491
+
1492
+ l1_ratio : float, default=0.0
1493
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1494
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1495
+ (aka Frobenius Norm).
1496
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1497
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1498
+
1499
+ .. versionadded:: 0.17
1500
+ Regularization parameter *l1_ratio* used in the Coordinate Descent
1501
+ solver.
1502
+
1503
+ verbose : int, default=0
1504
+ Whether to be verbose.
1505
+
1506
+ shuffle : bool, default=False
1507
+ If true, randomize the order of coordinates in the CD solver.
1508
+
1509
+ .. versionadded:: 0.17
1510
+ *shuffle* parameter used in the Coordinate Descent solver.
1511
+
1512
+ Attributes
1513
+ ----------
1514
+ components_ : ndarray of shape (n_components, n_features)
1515
+ Factorization matrix, sometimes called 'dictionary'.
1516
+
1517
+ n_components_ : int
1518
+ The number of components. It is same as the `n_components` parameter
1519
+ if it was given. Otherwise, it will be same as the number of
1520
+ features.
1521
+
1522
+ reconstruction_err_ : float
1523
+ Frobenius norm of the matrix difference, or beta-divergence, between
1524
+ the training data ``X`` and the reconstructed data ``WH`` from
1525
+ the fitted model.
1526
+
1527
+ n_iter_ : int
1528
+ Actual number of iterations.
1529
+
1530
+ n_features_in_ : int
1531
+ Number of features seen during :term:`fit`.
1532
+
1533
+ .. versionadded:: 0.24
1534
+
1535
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1536
+ Names of features seen during :term:`fit`. Defined only when `X`
1537
+ has feature names that are all strings.
1538
+
1539
+ .. versionadded:: 1.0
1540
+
1541
+ See Also
1542
+ --------
1543
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1544
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1545
+ PCA : Principal component analysis.
1546
+ SparseCoder : Find a sparse representation of data from a fixed,
1547
+ precomputed dictionary.
1548
+ SparsePCA : Sparse Principal Components Analysis.
1549
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
1550
+
1551
+ References
1552
+ ----------
1553
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1554
+ factorizations" <10.1587/transfun.E92.A.708>`
1555
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1556
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1557
+
1558
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1559
+ beta-divergence" <10.1162/NECO_a_00168>`
1560
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1561
+
1562
+ Examples
1563
+ --------
1564
+ >>> import numpy as np
1565
+ >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
1566
+ >>> from sklearn.decomposition import NMF
1567
+ >>> model = NMF(n_components=2, init='random', random_state=0)
1568
+ >>> W = model.fit_transform(X)
1569
+ >>> H = model.components_
1570
+ """
1571
+
1572
+ _parameter_constraints: dict = {
1573
+ **_BaseNMF._parameter_constraints,
1574
+ "solver": [StrOptions({"mu", "cd"})],
1575
+ "shuffle": ["boolean"],
1576
+ }
1577
+
1578
+ def __init__(
1579
+ self,
1580
+ n_components="warn",
1581
+ *,
1582
+ init=None,
1583
+ solver="cd",
1584
+ beta_loss="frobenius",
1585
+ tol=1e-4,
1586
+ max_iter=200,
1587
+ random_state=None,
1588
+ alpha_W=0.0,
1589
+ alpha_H="same",
1590
+ l1_ratio=0.0,
1591
+ verbose=0,
1592
+ shuffle=False,
1593
+ ):
1594
+ super().__init__(
1595
+ n_components=n_components,
1596
+ init=init,
1597
+ beta_loss=beta_loss,
1598
+ tol=tol,
1599
+ max_iter=max_iter,
1600
+ random_state=random_state,
1601
+ alpha_W=alpha_W,
1602
+ alpha_H=alpha_H,
1603
+ l1_ratio=l1_ratio,
1604
+ verbose=verbose,
1605
+ )
1606
+
1607
+ self.solver = solver
1608
+ self.shuffle = shuffle
1609
+
1610
+ def _check_params(self, X):
1611
+ super()._check_params(X)
1612
+
1613
+ # solver
1614
+ if self.solver != "mu" and self.beta_loss not in (2, "frobenius"):
1615
+ # 'mu' is the only solver that handles other beta losses than 'frobenius'
1616
+ raise ValueError(
1617
+ f"Invalid beta_loss parameter: solver {self.solver!r} does not handle "
1618
+ f"beta_loss = {self.beta_loss!r}"
1619
+ )
1620
+ if self.solver == "mu" and self.init == "nndsvd":
1621
+ warnings.warn(
1622
+ (
1623
+ "The multiplicative update ('mu') solver cannot update "
1624
+ "zeros present in the initialization, and so leads to "
1625
+ "poorer results when used jointly with init='nndsvd'. "
1626
+ "You may try init='nndsvda' or init='nndsvdar' instead."
1627
+ ),
1628
+ UserWarning,
1629
+ )
1630
+
1631
+ return self
1632
+
1633
+ @_fit_context(prefer_skip_nested_validation=True)
1634
+ def fit_transform(self, X, y=None, W=None, H=None):
1635
+ """Learn a NMF model for the data X and returns the transformed data.
1636
+
1637
+ This is more efficient than calling fit followed by transform.
1638
+
1639
+ Parameters
1640
+ ----------
1641
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1642
+ Training vector, where `n_samples` is the number of samples
1643
+ and `n_features` is the number of features.
1644
+
1645
+ y : Ignored
1646
+ Not used, present for API consistency by convention.
1647
+
1648
+ W : array-like of shape (n_samples, n_components), default=None
1649
+ If `init='custom'`, it is used as initial guess for the solution.
1650
+ If `None`, uses the initialisation method specified in `init`.
1651
+
1652
+ H : array-like of shape (n_components, n_features), default=None
1653
+ If `init='custom'`, it is used as initial guess for the solution.
1654
+ If `None`, uses the initialisation method specified in `init`.
1655
+
1656
+ Returns
1657
+ -------
1658
+ W : ndarray of shape (n_samples, n_components)
1659
+ Transformed data.
1660
+ """
1661
+ X = self._validate_data(
1662
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
1663
+ )
1664
+
1665
+ with config_context(assume_finite=True):
1666
+ W, H, n_iter = self._fit_transform(X, W=W, H=H)
1667
+
1668
+ self.reconstruction_err_ = _beta_divergence(
1669
+ X, W, H, self._beta_loss, square_root=True
1670
+ )
1671
+
1672
+ self.n_components_ = H.shape[0]
1673
+ self.components_ = H
1674
+ self.n_iter_ = n_iter
1675
+
1676
+ return W
1677
+
1678
+ def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
1679
+ """Learn a NMF model for the data X and returns the transformed data.
1680
+
1681
+ Parameters
1682
+ ----------
1683
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1684
+ Data matrix to be decomposed
1685
+
1686
+ y : Ignored
1687
+
1688
+ W : array-like of shape (n_samples, n_components), default=None
1689
+ If `init='custom'`, it is used as initial guess for the solution.
1690
+ If `update_H=False`, it is initialised as an array of zeros, unless
1691
+ `solver='mu'`, then it is filled with values calculated by
1692
+ `np.sqrt(X.mean() / self._n_components)`.
1693
+ If `None`, uses the initialisation method specified in `init`.
1694
+
1695
+ H : array-like of shape (n_components, n_features), default=None
1696
+ If `init='custom'`, it is used as initial guess for the solution.
1697
+ If `update_H=False`, it is used as a constant, to solve for W only.
1698
+ If `None`, uses the initialisation method specified in `init`.
1699
+
1700
+ update_H : bool, default=True
1701
+ If True, both W and H will be estimated from initial guesses,
1702
+ this corresponds to a call to the 'fit_transform' method.
1703
+ If False, only W will be estimated, this corresponds to a call
1704
+ to the 'transform' method.
1705
+
1706
+ Returns
1707
+ -------
1708
+ W : ndarray of shape (n_samples, n_components)
1709
+ Transformed data.
1710
+
1711
+ H : ndarray of shape (n_components, n_features)
1712
+ Factorization matrix, sometimes called 'dictionary'.
1713
+
1714
+ n_iter_ : int
1715
+ Actual number of iterations.
1716
+ """
1717
+ check_non_negative(X, "NMF (input X)")
1718
+
1719
+ # check parameters
1720
+ self._check_params(X)
1721
+
1722
+ if X.min() == 0 and self._beta_loss <= 0:
1723
+ raise ValueError(
1724
+ "When beta_loss <= 0 and X contains zeros, "
1725
+ "the solver may diverge. Please add small values "
1726
+ "to X, or use a positive beta_loss."
1727
+ )
1728
+
1729
+ # initialize or check W and H
1730
+ W, H = self._check_w_h(X, W, H, update_H)
1731
+
1732
+ # scale the regularization terms
1733
+ l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
1734
+
1735
+ if self.solver == "cd":
1736
+ W, H, n_iter = _fit_coordinate_descent(
1737
+ X,
1738
+ W,
1739
+ H,
1740
+ self.tol,
1741
+ self.max_iter,
1742
+ l1_reg_W,
1743
+ l1_reg_H,
1744
+ l2_reg_W,
1745
+ l2_reg_H,
1746
+ update_H=update_H,
1747
+ verbose=self.verbose,
1748
+ shuffle=self.shuffle,
1749
+ random_state=self.random_state,
1750
+ )
1751
+ elif self.solver == "mu":
1752
+ W, H, n_iter, *_ = _fit_multiplicative_update(
1753
+ X,
1754
+ W,
1755
+ H,
1756
+ self._beta_loss,
1757
+ self.max_iter,
1758
+ self.tol,
1759
+ l1_reg_W,
1760
+ l1_reg_H,
1761
+ l2_reg_W,
1762
+ l2_reg_H,
1763
+ update_H,
1764
+ self.verbose,
1765
+ )
1766
+ else:
1767
+ raise ValueError("Invalid solver parameter '%s'." % self.solver)
1768
+
1769
+ if n_iter == self.max_iter and self.tol > 0:
1770
+ warnings.warn(
1771
+ "Maximum number of iterations %d reached. Increase "
1772
+ "it to improve convergence."
1773
+ % self.max_iter,
1774
+ ConvergenceWarning,
1775
+ )
1776
+
1777
+ return W, H, n_iter
1778
+
1779
+ def transform(self, X):
1780
+ """Transform the data X according to the fitted NMF model.
1781
+
1782
+ Parameters
1783
+ ----------
1784
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1785
+ Training vector, where `n_samples` is the number of samples
1786
+ and `n_features` is the number of features.
1787
+
1788
+ Returns
1789
+ -------
1790
+ W : ndarray of shape (n_samples, n_components)
1791
+ Transformed data.
1792
+ """
1793
+ check_is_fitted(self)
1794
+ X = self._validate_data(
1795
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False
1796
+ )
1797
+
1798
+ with config_context(assume_finite=True):
1799
+ W, *_ = self._fit_transform(X, H=self.components_, update_H=False)
1800
+
1801
+ return W
1802
+
1803
+
1804
+ class MiniBatchNMF(_BaseNMF):
1805
+ """Mini-Batch Non-Negative Matrix Factorization (NMF).
1806
+
1807
+ .. versionadded:: 1.1
1808
+
1809
+ Find two non-negative matrices, i.e. matrices with all non-negative elements,
1810
+ (`W`, `H`) whose product approximates the non-negative matrix `X`. This
1811
+ factorization can be used for example for dimensionality reduction, source
1812
+ separation or topic extraction.
1813
+
1814
+ The objective function is:
1815
+
1816
+ .. math::
1817
+
1818
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
1819
+
1820
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
1821
+
1822
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
1823
+
1824
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
1825
+
1826
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
1827
+
1828
+ Where:
1829
+
1830
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
1831
+
1832
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
1833
+
1834
+ The generic norm :math:`||X - WH||_{loss}^2` may represent
1835
+ the Frobenius norm or another supported beta-divergence loss.
1836
+ The choice between options is controlled by the `beta_loss` parameter.
1837
+
1838
+ The objective function is minimized with an alternating minimization of `W`
1839
+ and `H`.
1840
+
1841
+ Note that the transformed data is named `W` and the components matrix is
1842
+ named `H`. In the NMF literature, the naming convention is usually the opposite
1843
+ since the data matrix `X` is transposed.
1844
+
1845
+ Read more in the :ref:`User Guide <MiniBatchNMF>`.
1846
+
1847
+ Parameters
1848
+ ----------
1849
+ n_components : int or {'auto'} or None, default=None
1850
+ Number of components, if `n_components` is not set all features
1851
+ are kept.
1852
+ If `n_components='auto'`, the number of components is automatically inferred
1853
+ from W or H shapes.
1854
+
1855
+ .. versionchanged:: 1.4
1856
+ Added `'auto'` value.
1857
+
1858
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
1859
+ Method used to initialize the procedure.
1860
+ Valid options:
1861
+
1862
+ - `None`: 'nndsvda' if `n_components <= min(n_samples, n_features)`,
1863
+ otherwise random.
1864
+
1865
+ - `'random'`: non-negative random matrices, scaled with:
1866
+ `sqrt(X.mean() / n_components)`
1867
+
1868
+ - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)
1869
+ initialization (better for sparseness).
1870
+
1871
+ - `'nndsvda'`: NNDSVD with zeros filled with the average of X
1872
+ (better when sparsity is not desired).
1873
+
1874
+ - `'nndsvdar'` NNDSVD with zeros filled with small random values
1875
+ (generally faster, less accurate alternative to NNDSVDa
1876
+ for when sparsity is not desired).
1877
+
1878
+ - `'custom'`: Use custom matrices `W` and `H` which must both be provided.
1879
+
1880
+ batch_size : int, default=1024
1881
+ Number of samples in each mini-batch. Large batch sizes
1882
+ give better long-term convergence at the cost of a slower start.
1883
+
1884
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1885
+ 'itakura-saito'}, default='frobenius'
1886
+ Beta divergence to be minimized, measuring the distance between `X`
1887
+ and the dot product `WH`. Note that values different from 'frobenius'
1888
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1889
+ fits. Note that for `beta_loss <= 0` (or 'itakura-saito'), the input
1890
+ matrix `X` cannot contain zeros.
1891
+
1892
+ tol : float, default=1e-4
1893
+ Control early stopping based on the norm of the differences in `H`
1894
+ between 2 steps. To disable early stopping based on changes in `H`, set
1895
+ `tol` to 0.0.
1896
+
1897
+ max_no_improvement : int, default=10
1898
+ Control early stopping based on the consecutive number of mini batches
1899
+ that does not yield an improvement on the smoothed cost function.
1900
+ To disable convergence detection based on cost function, set
1901
+ `max_no_improvement` to None.
1902
+
1903
+ max_iter : int, default=200
1904
+ Maximum number of iterations over the complete dataset before
1905
+ timing out.
1906
+
1907
+ alpha_W : float, default=0.0
1908
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1909
+ (default) to have no regularization on `W`.
1910
+
1911
+ alpha_H : float or "same", default="same"
1912
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1913
+ have no regularization on `H`. If "same" (default), it takes the same value as
1914
+ `alpha_W`.
1915
+
1916
+ l1_ratio : float, default=0.0
1917
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1918
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1919
+ (aka Frobenius Norm).
1920
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1921
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1922
+
1923
+ forget_factor : float, default=0.7
1924
+ Amount of rescaling of past information. Its value could be 1 with
1925
+ finite datasets. Choosing values < 1 is recommended with online
1926
+ learning as more recent batches will weight more than past batches.
1927
+
1928
+ fresh_restarts : bool, default=False
1929
+ Whether to completely solve for W at each step. Doing fresh restarts will likely
1930
+ lead to a better solution for a same number of iterations but it is much slower.
1931
+
1932
+ fresh_restarts_max_iter : int, default=30
1933
+ Maximum number of iterations when solving for W at each step. Only used when
1934
+ doing fresh restarts. These iterations may be stopped early based on a small
1935
+ change of W controlled by `tol`.
1936
+
1937
+ transform_max_iter : int, default=None
1938
+ Maximum number of iterations when solving for W at transform time.
1939
+ If None, it defaults to `max_iter`.
1940
+
1941
+ random_state : int, RandomState instance or None, default=None
1942
+ Used for initialisation (when ``init`` == 'nndsvdar' or
1943
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1944
+ results across multiple function calls.
1945
+ See :term:`Glossary <random_state>`.
1946
+
1947
+ verbose : bool, default=False
1948
+ Whether to be verbose.
1949
+
1950
+ Attributes
1951
+ ----------
1952
+ components_ : ndarray of shape (n_components, n_features)
1953
+ Factorization matrix, sometimes called 'dictionary'.
1954
+
1955
+ n_components_ : int
1956
+ The number of components. It is same as the `n_components` parameter
1957
+ if it was given. Otherwise, it will be same as the number of
1958
+ features.
1959
+
1960
+ reconstruction_err_ : float
1961
+ Frobenius norm of the matrix difference, or beta-divergence, between
1962
+ the training data `X` and the reconstructed data `WH` from
1963
+ the fitted model.
1964
+
1965
+ n_iter_ : int
1966
+ Actual number of started iterations over the whole dataset.
1967
+
1968
+ n_steps_ : int
1969
+ Number of mini-batches processed.
1970
+
1971
+ n_features_in_ : int
1972
+ Number of features seen during :term:`fit`.
1973
+
1974
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1975
+ Names of features seen during :term:`fit`. Defined only when `X`
1976
+ has feature names that are all strings.
1977
+
1978
+ See Also
1979
+ --------
1980
+ NMF : Non-negative matrix factorization.
1981
+ MiniBatchDictionaryLearning : Finds a dictionary that can best be used to represent
1982
+ data using a sparse code.
1983
+
1984
+ References
1985
+ ----------
1986
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1987
+ factorizations" <10.1587/transfun.E92.A.708>`
1988
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1989
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1990
+
1991
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1992
+ beta-divergence" <10.1162/NECO_a_00168>`
1993
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1994
+
1995
+ .. [3] :doi:`"Online algorithms for nonnegative matrix factorization with the
1996
+ Itakura-Saito divergence" <10.1109/ASPAA.2011.6082314>`
1997
+ Lefevre, A., Bach, F., Fevotte, C. (2011). WASPA.
1998
+
1999
+ Examples
2000
+ --------
2001
+ >>> import numpy as np
2002
+ >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
2003
+ >>> from sklearn.decomposition import MiniBatchNMF
2004
+ >>> model = MiniBatchNMF(n_components=2, init='random', random_state=0)
2005
+ >>> W = model.fit_transform(X)
2006
+ >>> H = model.components_
2007
+ """
2008
+
2009
+ _parameter_constraints: dict = {
2010
+ **_BaseNMF._parameter_constraints,
2011
+ "max_no_improvement": [Interval(Integral, 1, None, closed="left"), None],
2012
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
2013
+ "forget_factor": [Interval(Real, 0, 1, closed="both")],
2014
+ "fresh_restarts": ["boolean"],
2015
+ "fresh_restarts_max_iter": [Interval(Integral, 1, None, closed="left")],
2016
+ "transform_max_iter": [Interval(Integral, 1, None, closed="left"), None],
2017
+ }
2018
+
2019
+ def __init__(
2020
+ self,
2021
+ n_components="warn",
2022
+ *,
2023
+ init=None,
2024
+ batch_size=1024,
2025
+ beta_loss="frobenius",
2026
+ tol=1e-4,
2027
+ max_no_improvement=10,
2028
+ max_iter=200,
2029
+ alpha_W=0.0,
2030
+ alpha_H="same",
2031
+ l1_ratio=0.0,
2032
+ forget_factor=0.7,
2033
+ fresh_restarts=False,
2034
+ fresh_restarts_max_iter=30,
2035
+ transform_max_iter=None,
2036
+ random_state=None,
2037
+ verbose=0,
2038
+ ):
2039
+ super().__init__(
2040
+ n_components=n_components,
2041
+ init=init,
2042
+ beta_loss=beta_loss,
2043
+ tol=tol,
2044
+ max_iter=max_iter,
2045
+ random_state=random_state,
2046
+ alpha_W=alpha_W,
2047
+ alpha_H=alpha_H,
2048
+ l1_ratio=l1_ratio,
2049
+ verbose=verbose,
2050
+ )
2051
+
2052
+ self.max_no_improvement = max_no_improvement
2053
+ self.batch_size = batch_size
2054
+ self.forget_factor = forget_factor
2055
+ self.fresh_restarts = fresh_restarts
2056
+ self.fresh_restarts_max_iter = fresh_restarts_max_iter
2057
+ self.transform_max_iter = transform_max_iter
2058
+
2059
+ def _check_params(self, X):
2060
+ super()._check_params(X)
2061
+
2062
+ # batch_size
2063
+ self._batch_size = min(self.batch_size, X.shape[0])
2064
+
2065
+ # forget_factor
2066
+ self._rho = self.forget_factor ** (self._batch_size / X.shape[0])
2067
+
2068
+ # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
2069
+ if self._beta_loss < 1:
2070
+ self._gamma = 1.0 / (2.0 - self._beta_loss)
2071
+ elif self._beta_loss > 2:
2072
+ self._gamma = 1.0 / (self._beta_loss - 1.0)
2073
+ else:
2074
+ self._gamma = 1.0
2075
+
2076
+ # transform_max_iter
2077
+ self._transform_max_iter = (
2078
+ self.max_iter
2079
+ if self.transform_max_iter is None
2080
+ else self.transform_max_iter
2081
+ )
2082
+
2083
+ return self
2084
+
2085
+ def _solve_W(self, X, H, max_iter):
2086
+ """Minimize the objective function w.r.t W.
2087
+
2088
+ Update W with H being fixed, until convergence. This is the heart
2089
+ of `transform` but it's also used during `fit` when doing fresh restarts.
2090
+ """
2091
+ avg = np.sqrt(X.mean() / self._n_components)
2092
+ W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
2093
+ W_buffer = W.copy()
2094
+
2095
+ # Get scaled regularization terms. Done for each minibatch to take into account
2096
+ # variable sizes of minibatches.
2097
+ l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X)
2098
+
2099
+ for _ in range(max_iter):
2100
+ W, *_ = _multiplicative_update_w(
2101
+ X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
2102
+ )
2103
+
2104
+ W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
2105
+ if self.tol > 0 and W_diff <= self.tol:
2106
+ break
2107
+
2108
+ W_buffer[:] = W
2109
+
2110
+ return W
2111
+
2112
+ def _minibatch_step(self, X, W, H, update_H):
2113
+ """Perform the update of W and H for one minibatch."""
2114
+ batch_size = X.shape[0]
2115
+
2116
+ # get scaled regularization terms. Done for each minibatch to take into account
2117
+ # variable sizes of minibatches.
2118
+ l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
2119
+
2120
+ # update W
2121
+ if self.fresh_restarts or W is None:
2122
+ W = self._solve_W(X, H, self.fresh_restarts_max_iter)
2123
+ else:
2124
+ W, *_ = _multiplicative_update_w(
2125
+ X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
2126
+ )
2127
+
2128
+ # necessary for stability with beta_loss < 1
2129
+ if self._beta_loss < 1:
2130
+ W[W < np.finfo(np.float64).eps] = 0.0
2131
+
2132
+ batch_cost = (
2133
+ _beta_divergence(X, W, H, self._beta_loss)
2134
+ + l1_reg_W * W.sum()
2135
+ + l1_reg_H * H.sum()
2136
+ + l2_reg_W * (W**2).sum()
2137
+ + l2_reg_H * (H**2).sum()
2138
+ ) / batch_size
2139
+
2140
+ # update H (only at fit or fit_transform)
2141
+ if update_H:
2142
+ H[:] = _multiplicative_update_h(
2143
+ X,
2144
+ W,
2145
+ H,
2146
+ beta_loss=self._beta_loss,
2147
+ l1_reg_H=l1_reg_H,
2148
+ l2_reg_H=l2_reg_H,
2149
+ gamma=self._gamma,
2150
+ A=self._components_numerator,
2151
+ B=self._components_denominator,
2152
+ rho=self._rho,
2153
+ )
2154
+
2155
+ # necessary for stability with beta_loss < 1
2156
+ if self._beta_loss <= 1:
2157
+ H[H < np.finfo(np.float64).eps] = 0.0
2158
+
2159
+ return batch_cost
2160
+
2161
+ def _minibatch_convergence(
2162
+ self, X, batch_cost, H, H_buffer, n_samples, step, n_steps
2163
+ ):
2164
+ """Helper function to encapsulate the early stopping logic"""
2165
+ batch_size = X.shape[0]
2166
+
2167
+ # counts steps starting from 1 for user friendly verbose mode.
2168
+ step = step + 1
2169
+
2170
+ # Ignore first iteration because H is not updated yet.
2171
+ if step == 1:
2172
+ if self.verbose:
2173
+ print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
2174
+ return False
2175
+
2176
+ # Compute an Exponentially Weighted Average of the cost function to
2177
+ # monitor the convergence while discarding minibatch-local stochastic
2178
+ # variability: https://en.wikipedia.org/wiki/Moving_average
2179
+ if self._ewa_cost is None:
2180
+ self._ewa_cost = batch_cost
2181
+ else:
2182
+ alpha = batch_size / (n_samples + 1)
2183
+ alpha = min(alpha, 1)
2184
+ self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
2185
+
2186
+ # Log progress to be able to monitor convergence
2187
+ if self.verbose:
2188
+ print(
2189
+ f"Minibatch step {step}/{n_steps}: mean batch cost: "
2190
+ f"{batch_cost}, ewa cost: {self._ewa_cost}"
2191
+ )
2192
+
2193
+ # Early stopping based on change of H
2194
+ H_diff = linalg.norm(H - H_buffer) / linalg.norm(H)
2195
+ if self.tol > 0 and H_diff <= self.tol:
2196
+ if self.verbose:
2197
+ print(f"Converged (small H change) at step {step}/{n_steps}")
2198
+ return True
2199
+
2200
+ # Early stopping heuristic due to lack of improvement on smoothed
2201
+ # cost function
2202
+ if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
2203
+ self._no_improvement = 0
2204
+ self._ewa_cost_min = self._ewa_cost
2205
+ else:
2206
+ self._no_improvement += 1
2207
+
2208
+ if (
2209
+ self.max_no_improvement is not None
2210
+ and self._no_improvement >= self.max_no_improvement
2211
+ ):
2212
+ if self.verbose:
2213
+ print(
2214
+ "Converged (lack of improvement in objective function) "
2215
+ f"at step {step}/{n_steps}"
2216
+ )
2217
+ return True
2218
+
2219
+ return False
2220
+
2221
+ @_fit_context(prefer_skip_nested_validation=True)
2222
+ def fit_transform(self, X, y=None, W=None, H=None):
2223
+ """Learn a NMF model for the data X and returns the transformed data.
2224
+
2225
+ This is more efficient than calling fit followed by transform.
2226
+
2227
+ Parameters
2228
+ ----------
2229
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2230
+ Data matrix to be decomposed.
2231
+
2232
+ y : Ignored
2233
+ Not used, present here for API consistency by convention.
2234
+
2235
+ W : array-like of shape (n_samples, n_components), default=None
2236
+ If `init='custom'`, it is used as initial guess for the solution.
2237
+ If `None`, uses the initialisation method specified in `init`.
2238
+
2239
+ H : array-like of shape (n_components, n_features), default=None
2240
+ If `init='custom'`, it is used as initial guess for the solution.
2241
+ If `None`, uses the initialisation method specified in `init`.
2242
+
2243
+ Returns
2244
+ -------
2245
+ W : ndarray of shape (n_samples, n_components)
2246
+ Transformed data.
2247
+ """
2248
+ X = self._validate_data(
2249
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
2250
+ )
2251
+
2252
+ with config_context(assume_finite=True):
2253
+ W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H)
2254
+
2255
+ self.reconstruction_err_ = _beta_divergence(
2256
+ X, W, H, self._beta_loss, square_root=True
2257
+ )
2258
+
2259
+ self.n_components_ = H.shape[0]
2260
+ self.components_ = H
2261
+ self.n_iter_ = n_iter
2262
+ self.n_steps_ = n_steps
2263
+
2264
+ return W
2265
+
2266
+ def _fit_transform(self, X, W=None, H=None, update_H=True):
2267
+ """Learn a NMF model for the data X and returns the transformed data.
2268
+
2269
+ Parameters
2270
+ ----------
2271
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
2272
+ Data matrix to be decomposed.
2273
+
2274
+ W : array-like of shape (n_samples, n_components), default=None
2275
+ If `init='custom'`, it is used as initial guess for the solution.
2276
+ If `update_H=False`, it is initialised as an array of zeros, unless
2277
+ `solver='mu'`, then it is filled with values calculated by
2278
+ `np.sqrt(X.mean() / self._n_components)`.
2279
+ If `None`, uses the initialisation method specified in `init`.
2280
+
2281
+ H : array-like of shape (n_components, n_features), default=None
2282
+ If `init='custom'`, it is used as initial guess for the solution.
2283
+ If `update_H=False`, it is used as a constant, to solve for W only.
2284
+ If `None`, uses the initialisation method specified in `init`.
2285
+
2286
+ update_H : bool, default=True
2287
+ If True, both W and H will be estimated from initial guesses,
2288
+ this corresponds to a call to the `fit_transform` method.
2289
+ If False, only W will be estimated, this corresponds to a call
2290
+ to the `transform` method.
2291
+
2292
+ Returns
2293
+ -------
2294
+ W : ndarray of shape (n_samples, n_components)
2295
+ Transformed data.
2296
+
2297
+ H : ndarray of shape (n_components, n_features)
2298
+ Factorization matrix, sometimes called 'dictionary'.
2299
+
2300
+ n_iter : int
2301
+ Actual number of started iterations over the whole dataset.
2302
+
2303
+ n_steps : int
2304
+ Number of mini-batches processed.
2305
+ """
2306
+ check_non_negative(X, "MiniBatchNMF (input X)")
2307
+ self._check_params(X)
2308
+
2309
+ if X.min() == 0 and self._beta_loss <= 0:
2310
+ raise ValueError(
2311
+ "When beta_loss <= 0 and X contains zeros, "
2312
+ "the solver may diverge. Please add small values "
2313
+ "to X, or use a positive beta_loss."
2314
+ )
2315
+
2316
+ n_samples = X.shape[0]
2317
+
2318
+ # initialize or check W and H
2319
+ W, H = self._check_w_h(X, W, H, update_H)
2320
+ H_buffer = H.copy()
2321
+
2322
+ # Initialize auxiliary matrices
2323
+ self._components_numerator = H.copy()
2324
+ self._components_denominator = np.ones(H.shape, dtype=H.dtype)
2325
+
2326
+ # Attributes to monitor the convergence
2327
+ self._ewa_cost = None
2328
+ self._ewa_cost_min = None
2329
+ self._no_improvement = 0
2330
+
2331
+ batches = gen_batches(n_samples, self._batch_size)
2332
+ batches = itertools.cycle(batches)
2333
+ n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
2334
+ n_steps = self.max_iter * n_steps_per_iter
2335
+
2336
+ for i, batch in zip(range(n_steps), batches):
2337
+ batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H)
2338
+
2339
+ if update_H and self._minibatch_convergence(
2340
+ X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps
2341
+ ):
2342
+ break
2343
+
2344
+ H_buffer[:] = H
2345
+
2346
+ if self.fresh_restarts:
2347
+ W = self._solve_W(X, H, self._transform_max_iter)
2348
+
2349
+ n_steps = i + 1
2350
+ n_iter = int(np.ceil(n_steps / n_steps_per_iter))
2351
+
2352
+ if n_iter == self.max_iter and self.tol > 0:
2353
+ warnings.warn(
2354
+ (
2355
+ f"Maximum number of iterations {self.max_iter} reached. "
2356
+ "Increase it to improve convergence."
2357
+ ),
2358
+ ConvergenceWarning,
2359
+ )
2360
+
2361
+ return W, H, n_iter, n_steps
2362
+
2363
+ def transform(self, X):
2364
+ """Transform the data X according to the fitted MiniBatchNMF model.
2365
+
2366
+ Parameters
2367
+ ----------
2368
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2369
+ Data matrix to be transformed by the model.
2370
+
2371
+ Returns
2372
+ -------
2373
+ W : ndarray of shape (n_samples, n_components)
2374
+ Transformed data.
2375
+ """
2376
+ check_is_fitted(self)
2377
+ X = self._validate_data(
2378
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False
2379
+ )
2380
+
2381
+ W = self._solve_W(X, self.components_, self._transform_max_iter)
2382
+
2383
+ return W
2384
+
2385
+ @_fit_context(prefer_skip_nested_validation=True)
2386
+ def partial_fit(self, X, y=None, W=None, H=None):
2387
+ """Update the model using the data in `X` as a mini-batch.
2388
+
2389
+ This method is expected to be called several times consecutively
2390
+ on different chunks of a dataset so as to implement out-of-core
2391
+ or online learning.
2392
+
2393
+ This is especially useful when the whole dataset is too big to fit in
2394
+ memory at once (see :ref:`scaling_strategies`).
2395
+
2396
+ Parameters
2397
+ ----------
2398
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2399
+ Data matrix to be decomposed.
2400
+
2401
+ y : Ignored
2402
+ Not used, present here for API consistency by convention.
2403
+
2404
+ W : array-like of shape (n_samples, n_components), default=None
2405
+ If `init='custom'`, it is used as initial guess for the solution.
2406
+ Only used for the first call to `partial_fit`.
2407
+
2408
+ H : array-like of shape (n_components, n_features), default=None
2409
+ If `init='custom'`, it is used as initial guess for the solution.
2410
+ Only used for the first call to `partial_fit`.
2411
+
2412
+ Returns
2413
+ -------
2414
+ self
2415
+ Returns the instance itself.
2416
+ """
2417
+ has_components = hasattr(self, "components_")
2418
+
2419
+ X = self._validate_data(
2420
+ X,
2421
+ accept_sparse=("csr", "csc"),
2422
+ dtype=[np.float64, np.float32],
2423
+ reset=not has_components,
2424
+ )
2425
+
2426
+ if not has_components:
2427
+ # This instance has not been fitted yet (fit or partial_fit)
2428
+ self._check_params(X)
2429
+ _, H = self._check_w_h(X, W=W, H=H, update_H=True)
2430
+
2431
+ self._components_numerator = H.copy()
2432
+ self._components_denominator = np.ones(H.shape, dtype=H.dtype)
2433
+ self.n_steps_ = 0
2434
+ else:
2435
+ H = self.components_
2436
+
2437
+ self._minibatch_step(X, None, H, update_H=True)
2438
+
2439
+ self.n_components_ = H.shape[0]
2440
+ self.components_ = H
2441
+ self.n_steps_ += 1
2442
+
2443
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_pca.py ADDED
@@ -0,0 +1,747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Principal Component Analysis.
2
+ """
3
+
4
+ # Author: Alexandre Gramfort <[email protected]>
5
+ # Olivier Grisel <[email protected]>
6
+ # Mathieu Blondel <[email protected]>
7
+ # Denis A. Engemann <[email protected]>
8
+ # Michael Eickenberg <[email protected]>
9
+ # Giorgio Patrini <[email protected]>
10
+ #
11
+ # License: BSD 3 clause
12
+
13
+ from math import log, sqrt
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ from scipy import linalg
18
+ from scipy.sparse import issparse
19
+ from scipy.sparse.linalg import svds
20
+ from scipy.special import gammaln
21
+
22
+ from ..base import _fit_context
23
+ from ..utils import check_random_state
24
+ from ..utils._arpack import _init_arpack_v0
25
+ from ..utils._array_api import _convert_to_numpy, get_namespace
26
+ from ..utils._param_validation import Interval, RealNotInt, StrOptions
27
+ from ..utils.extmath import fast_logdet, randomized_svd, stable_cumsum, svd_flip
28
+ from ..utils.sparsefuncs import _implicit_column_offset, mean_variance_axis
29
+ from ..utils.validation import check_is_fitted
30
+ from ._base import _BasePCA
31
+
32
+
33
+ def _assess_dimension(spectrum, rank, n_samples):
34
+ """Compute the log-likelihood of a rank ``rank`` dataset.
35
+
36
+ The dataset is assumed to be embedded in gaussian noise of shape(n,
37
+ dimf) having spectrum ``spectrum``. This implements the method of
38
+ T. P. Minka.
39
+
40
+ Parameters
41
+ ----------
42
+ spectrum : ndarray of shape (n_features,)
43
+ Data spectrum.
44
+ rank : int
45
+ Tested rank value. It should be strictly lower than n_features,
46
+ otherwise the method isn't specified (division by zero in equation
47
+ (31) from the paper).
48
+ n_samples : int
49
+ Number of samples.
50
+
51
+ Returns
52
+ -------
53
+ ll : float
54
+ The log-likelihood.
55
+
56
+ References
57
+ ----------
58
+ This implements the method of `Thomas P. Minka:
59
+ Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
60
+ <https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
61
+ """
62
+ xp, _ = get_namespace(spectrum)
63
+
64
+ n_features = spectrum.shape[0]
65
+ if not 1 <= rank < n_features:
66
+ raise ValueError("the tested rank should be in [1, n_features - 1]")
67
+
68
+ eps = 1e-15
69
+
70
+ if spectrum[rank - 1] < eps:
71
+ # When the tested rank is associated with a small eigenvalue, there's
72
+ # no point in computing the log-likelihood: it's going to be very
73
+ # small and won't be the max anyway. Also, it can lead to numerical
74
+ # issues below when computing pa, in particular in log((spectrum[i] -
75
+ # spectrum[j]) because this will take the log of something very small.
76
+ return -xp.inf
77
+
78
+ pu = -rank * log(2.0)
79
+ for i in range(1, rank + 1):
80
+ pu += (
81
+ gammaln((n_features - i + 1) / 2.0)
82
+ - log(xp.pi) * (n_features - i + 1) / 2.0
83
+ )
84
+
85
+ pl = xp.sum(xp.log(spectrum[:rank]))
86
+ pl = -pl * n_samples / 2.0
87
+
88
+ v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank))
89
+ pv = -log(v) * n_samples * (n_features - rank) / 2.0
90
+
91
+ m = n_features * rank - rank * (rank + 1.0) / 2.0
92
+ pp = log(2.0 * xp.pi) * (m + rank) / 2.0
93
+
94
+ pa = 0.0
95
+ spectrum_ = xp.asarray(spectrum, copy=True)
96
+ spectrum_[rank:n_features] = v
97
+ for i in range(rank):
98
+ for j in range(i + 1, spectrum.shape[0]):
99
+ pa += log(
100
+ (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])
101
+ ) + log(n_samples)
102
+
103
+ ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
104
+
105
+ return ll
106
+
107
+
108
+ def _infer_dimension(spectrum, n_samples):
109
+ """Infers the dimension of a dataset with a given spectrum.
110
+
111
+ The returned value will be in [1, n_features - 1].
112
+ """
113
+ xp, _ = get_namespace(spectrum)
114
+
115
+ ll = xp.empty_like(spectrum)
116
+ ll[0] = -xp.inf # we don't want to return n_components = 0
117
+ for rank in range(1, spectrum.shape[0]):
118
+ ll[rank] = _assess_dimension(spectrum, rank, n_samples)
119
+ return xp.argmax(ll)
120
+
121
+
122
+ class PCA(_BasePCA):
123
+ """Principal component analysis (PCA).
124
+
125
+ Linear dimensionality reduction using Singular Value Decomposition of the
126
+ data to project it to a lower dimensional space. The input data is centered
127
+ but not scaled for each feature before applying the SVD.
128
+
129
+ It uses the LAPACK implementation of the full SVD or a randomized truncated
130
+ SVD by the method of Halko et al. 2009, depending on the shape of the input
131
+ data and the number of components to extract.
132
+
133
+ It can also use the scipy.sparse.linalg ARPACK implementation of the
134
+ truncated SVD.
135
+
136
+ Notice that this class does not support sparse input. See
137
+ :class:`TruncatedSVD` for an alternative with sparse data.
138
+
139
+ For a usage example, see
140
+ :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py`
141
+
142
+ Read more in the :ref:`User Guide <PCA>`.
143
+
144
+ Parameters
145
+ ----------
146
+ n_components : int, float or 'mle', default=None
147
+ Number of components to keep.
148
+ if n_components is not set all components are kept::
149
+
150
+ n_components == min(n_samples, n_features)
151
+
152
+ If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
153
+ MLE is used to guess the dimension. Use of ``n_components == 'mle'``
154
+ will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
155
+
156
+ If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
157
+ number of components such that the amount of variance that needs to be
158
+ explained is greater than the percentage specified by n_components.
159
+
160
+ If ``svd_solver == 'arpack'``, the number of components must be
161
+ strictly less than the minimum of n_features and n_samples.
162
+
163
+ Hence, the None case results in::
164
+
165
+ n_components == min(n_samples, n_features) - 1
166
+
167
+ copy : bool, default=True
168
+ If False, data passed to fit are overwritten and running
169
+ fit(X).transform(X) will not yield the expected results,
170
+ use fit_transform(X) instead.
171
+
172
+ whiten : bool, default=False
173
+ When True (False by default) the `components_` vectors are multiplied
174
+ by the square root of n_samples and then divided by the singular values
175
+ to ensure uncorrelated outputs with unit component-wise variances.
176
+
177
+ Whitening will remove some information from the transformed signal
178
+ (the relative variance scales of the components) but can sometime
179
+ improve the predictive accuracy of the downstream estimators by
180
+ making their data respect some hard-wired assumptions.
181
+
182
+ svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto'
183
+ If auto :
184
+ The solver is selected by a default policy based on `X.shape` and
185
+ `n_components`: if the input data is larger than 500x500 and the
186
+ number of components to extract is lower than 80% of the smallest
187
+ dimension of the data, then the more efficient 'randomized'
188
+ method is enabled. Otherwise the exact full SVD is computed and
189
+ optionally truncated afterwards.
190
+ If full :
191
+ run exact full SVD calling the standard LAPACK solver via
192
+ `scipy.linalg.svd` and select the components by postprocessing
193
+ If arpack :
194
+ run SVD truncated to n_components calling ARPACK solver via
195
+ `scipy.sparse.linalg.svds`. It requires strictly
196
+ 0 < n_components < min(X.shape)
197
+ If randomized :
198
+ run randomized SVD by the method of Halko et al.
199
+
200
+ .. versionadded:: 0.18.0
201
+
202
+ tol : float, default=0.0
203
+ Tolerance for singular values computed by svd_solver == 'arpack'.
204
+ Must be of range [0.0, infinity).
205
+
206
+ .. versionadded:: 0.18.0
207
+
208
+ iterated_power : int or 'auto', default='auto'
209
+ Number of iterations for the power method computed by
210
+ svd_solver == 'randomized'.
211
+ Must be of range [0, infinity).
212
+
213
+ .. versionadded:: 0.18.0
214
+
215
+ n_oversamples : int, default=10
216
+ This parameter is only relevant when `svd_solver="randomized"`.
217
+ It corresponds to the additional number of random vectors to sample the
218
+ range of `X` so as to ensure proper conditioning. See
219
+ :func:`~sklearn.utils.extmath.randomized_svd` for more details.
220
+
221
+ .. versionadded:: 1.1
222
+
223
+ power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
224
+ Power iteration normalizer for randomized SVD solver.
225
+ Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
226
+ for more details.
227
+
228
+ .. versionadded:: 1.1
229
+
230
+ random_state : int, RandomState instance or None, default=None
231
+ Used when the 'arpack' or 'randomized' solvers are used. Pass an int
232
+ for reproducible results across multiple function calls.
233
+ See :term:`Glossary <random_state>`.
234
+
235
+ .. versionadded:: 0.18.0
236
+
237
+ Attributes
238
+ ----------
239
+ components_ : ndarray of shape (n_components, n_features)
240
+ Principal axes in feature space, representing the directions of
241
+ maximum variance in the data. Equivalently, the right singular
242
+ vectors of the centered input data, parallel to its eigenvectors.
243
+ The components are sorted by decreasing ``explained_variance_``.
244
+
245
+ explained_variance_ : ndarray of shape (n_components,)
246
+ The amount of variance explained by each of the selected components.
247
+ The variance estimation uses `n_samples - 1` degrees of freedom.
248
+
249
+ Equal to n_components largest eigenvalues
250
+ of the covariance matrix of X.
251
+
252
+ .. versionadded:: 0.18
253
+
254
+ explained_variance_ratio_ : ndarray of shape (n_components,)
255
+ Percentage of variance explained by each of the selected components.
256
+
257
+ If ``n_components`` is not set then all components are stored and the
258
+ sum of the ratios is equal to 1.0.
259
+
260
+ singular_values_ : ndarray of shape (n_components,)
261
+ The singular values corresponding to each of the selected components.
262
+ The singular values are equal to the 2-norms of the ``n_components``
263
+ variables in the lower-dimensional space.
264
+
265
+ .. versionadded:: 0.19
266
+
267
+ mean_ : ndarray of shape (n_features,)
268
+ Per-feature empirical mean, estimated from the training set.
269
+
270
+ Equal to `X.mean(axis=0)`.
271
+
272
+ n_components_ : int
273
+ The estimated number of components. When n_components is set
274
+ to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
275
+ number is estimated from input data. Otherwise it equals the parameter
276
+ n_components, or the lesser value of n_features and n_samples
277
+ if n_components is None.
278
+
279
+ n_samples_ : int
280
+ Number of samples in the training data.
281
+
282
+ noise_variance_ : float
283
+ The estimated noise covariance following the Probabilistic PCA model
284
+ from Tipping and Bishop 1999. See "Pattern Recognition and
285
+ Machine Learning" by C. Bishop, 12.2.1 p. 574 or
286
+ http://www.miketipping.com/papers/met-mppca.pdf. It is required to
287
+ compute the estimated data covariance and score samples.
288
+
289
+ Equal to the average of (min(n_features, n_samples) - n_components)
290
+ smallest eigenvalues of the covariance matrix of X.
291
+
292
+ n_features_in_ : int
293
+ Number of features seen during :term:`fit`.
294
+
295
+ .. versionadded:: 0.24
296
+
297
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
298
+ Names of features seen during :term:`fit`. Defined only when `X`
299
+ has feature names that are all strings.
300
+
301
+ .. versionadded:: 1.0
302
+
303
+ See Also
304
+ --------
305
+ KernelPCA : Kernel Principal Component Analysis.
306
+ SparsePCA : Sparse Principal Component Analysis.
307
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
308
+ IncrementalPCA : Incremental Principal Component Analysis.
309
+
310
+ References
311
+ ----------
312
+ For n_components == 'mle', this class uses the method from:
313
+ `Minka, T. P.. "Automatic choice of dimensionality for PCA".
314
+ In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
315
+
316
+ Implements the probabilistic PCA model from:
317
+ `Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
318
+ component analysis". Journal of the Royal Statistical Society:
319
+ Series B (Statistical Methodology), 61(3), 611-622.
320
+ <http://www.miketipping.com/papers/met-mppca.pdf>`_
321
+ via the score and score_samples methods.
322
+
323
+ For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
324
+
325
+ For svd_solver == 'randomized', see:
326
+ :doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
327
+ "Finding structure with randomness: Probabilistic algorithms for
328
+ constructing approximate matrix decompositions".
329
+ SIAM review, 53(2), 217-288.
330
+ <10.1137/090771806>`
331
+ and also
332
+ :doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
333
+ "A randomized algorithm for the decomposition of matrices".
334
+ Applied and Computational Harmonic Analysis, 30(1), 47-68.
335
+ <10.1016/j.acha.2010.02.003>`
336
+
337
+ Examples
338
+ --------
339
+ >>> import numpy as np
340
+ >>> from sklearn.decomposition import PCA
341
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
342
+ >>> pca = PCA(n_components=2)
343
+ >>> pca.fit(X)
344
+ PCA(n_components=2)
345
+ >>> print(pca.explained_variance_ratio_)
346
+ [0.9924... 0.0075...]
347
+ >>> print(pca.singular_values_)
348
+ [6.30061... 0.54980...]
349
+
350
+ >>> pca = PCA(n_components=2, svd_solver='full')
351
+ >>> pca.fit(X)
352
+ PCA(n_components=2, svd_solver='full')
353
+ >>> print(pca.explained_variance_ratio_)
354
+ [0.9924... 0.00755...]
355
+ >>> print(pca.singular_values_)
356
+ [6.30061... 0.54980...]
357
+
358
+ >>> pca = PCA(n_components=1, svd_solver='arpack')
359
+ >>> pca.fit(X)
360
+ PCA(n_components=1, svd_solver='arpack')
361
+ >>> print(pca.explained_variance_ratio_)
362
+ [0.99244...]
363
+ >>> print(pca.singular_values_)
364
+ [6.30061...]
365
+ """
366
+
367
+ _parameter_constraints: dict = {
368
+ "n_components": [
369
+ Interval(Integral, 0, None, closed="left"),
370
+ Interval(RealNotInt, 0, 1, closed="neither"),
371
+ StrOptions({"mle"}),
372
+ None,
373
+ ],
374
+ "copy": ["boolean"],
375
+ "whiten": ["boolean"],
376
+ "svd_solver": [StrOptions({"auto", "full", "arpack", "randomized"})],
377
+ "tol": [Interval(Real, 0, None, closed="left")],
378
+ "iterated_power": [
379
+ StrOptions({"auto"}),
380
+ Interval(Integral, 0, None, closed="left"),
381
+ ],
382
+ "n_oversamples": [Interval(Integral, 1, None, closed="left")],
383
+ "power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})],
384
+ "random_state": ["random_state"],
385
+ }
386
+
387
+ def __init__(
388
+ self,
389
+ n_components=None,
390
+ *,
391
+ copy=True,
392
+ whiten=False,
393
+ svd_solver="auto",
394
+ tol=0.0,
395
+ iterated_power="auto",
396
+ n_oversamples=10,
397
+ power_iteration_normalizer="auto",
398
+ random_state=None,
399
+ ):
400
+ self.n_components = n_components
401
+ self.copy = copy
402
+ self.whiten = whiten
403
+ self.svd_solver = svd_solver
404
+ self.tol = tol
405
+ self.iterated_power = iterated_power
406
+ self.n_oversamples = n_oversamples
407
+ self.power_iteration_normalizer = power_iteration_normalizer
408
+ self.random_state = random_state
409
+
410
+ @_fit_context(prefer_skip_nested_validation=True)
411
+ def fit(self, X, y=None):
412
+ """Fit the model with X.
413
+
414
+ Parameters
415
+ ----------
416
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
417
+ Training data, where `n_samples` is the number of samples
418
+ and `n_features` is the number of features.
419
+
420
+ y : Ignored
421
+ Ignored.
422
+
423
+ Returns
424
+ -------
425
+ self : object
426
+ Returns the instance itself.
427
+ """
428
+ self._fit(X)
429
+ return self
430
+
431
+ @_fit_context(prefer_skip_nested_validation=True)
432
+ def fit_transform(self, X, y=None):
433
+ """Fit the model with X and apply the dimensionality reduction on X.
434
+
435
+ Parameters
436
+ ----------
437
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
438
+ Training data, where `n_samples` is the number of samples
439
+ and `n_features` is the number of features.
440
+
441
+ y : Ignored
442
+ Ignored.
443
+
444
+ Returns
445
+ -------
446
+ X_new : ndarray of shape (n_samples, n_components)
447
+ Transformed values.
448
+
449
+ Notes
450
+ -----
451
+ This method returns a Fortran-ordered array. To convert it to a
452
+ C-ordered array, use 'np.ascontiguousarray'.
453
+ """
454
+ U, S, Vt = self._fit(X)
455
+ U = U[:, : self.n_components_]
456
+
457
+ if self.whiten:
458
+ # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
459
+ U *= sqrt(X.shape[0] - 1)
460
+ else:
461
+ # X_new = X * V = U * S * Vt * V = U * S
462
+ U *= S[: self.n_components_]
463
+
464
+ return U
465
+
466
+ def _fit(self, X):
467
+ """Dispatch to the right submethod depending on the chosen solver."""
468
+ xp, is_array_api_compliant = get_namespace(X)
469
+
470
+ # Raise an error for sparse input and unsupported svd_solver
471
+ if issparse(X) and self.svd_solver != "arpack":
472
+ raise TypeError(
473
+ 'PCA only support sparse inputs with the "arpack" solver, while '
474
+ f'"{self.svd_solver}" was passed. See TruncatedSVD for a possible'
475
+ " alternative."
476
+ )
477
+ # Raise an error for non-Numpy input and arpack solver.
478
+ if self.svd_solver == "arpack" and is_array_api_compliant:
479
+ raise ValueError(
480
+ "PCA with svd_solver='arpack' is not supported for Array API inputs."
481
+ )
482
+
483
+ X = self._validate_data(
484
+ X,
485
+ dtype=[xp.float64, xp.float32],
486
+ accept_sparse=("csr", "csc"),
487
+ ensure_2d=True,
488
+ copy=self.copy,
489
+ )
490
+
491
+ # Handle n_components==None
492
+ if self.n_components is None:
493
+ if self.svd_solver != "arpack":
494
+ n_components = min(X.shape)
495
+ else:
496
+ n_components = min(X.shape) - 1
497
+ else:
498
+ n_components = self.n_components
499
+
500
+ # Handle svd_solver
501
+ self._fit_svd_solver = self.svd_solver
502
+ if self._fit_svd_solver == "auto":
503
+ # Small problem or n_components == 'mle', just call full PCA
504
+ if max(X.shape) <= 500 or n_components == "mle":
505
+ self._fit_svd_solver = "full"
506
+ elif 1 <= n_components < 0.8 * min(X.shape):
507
+ self._fit_svd_solver = "randomized"
508
+ # This is also the case of n_components in (0,1)
509
+ else:
510
+ self._fit_svd_solver = "full"
511
+
512
+ # Call different fits for either full or truncated SVD
513
+ if self._fit_svd_solver == "full":
514
+ return self._fit_full(X, n_components)
515
+ elif self._fit_svd_solver in ["arpack", "randomized"]:
516
+ return self._fit_truncated(X, n_components, self._fit_svd_solver)
517
+
518
+ def _fit_full(self, X, n_components):
519
+ """Fit the model by computing full SVD on X."""
520
+ xp, is_array_api_compliant = get_namespace(X)
521
+
522
+ n_samples, n_features = X.shape
523
+
524
+ if n_components == "mle":
525
+ if n_samples < n_features:
526
+ raise ValueError(
527
+ "n_components='mle' is only supported if n_samples >= n_features"
528
+ )
529
+ elif not 0 <= n_components <= min(n_samples, n_features):
530
+ raise ValueError(
531
+ "n_components=%r must be between 0 and "
532
+ "min(n_samples, n_features)=%r with "
533
+ "svd_solver='full'" % (n_components, min(n_samples, n_features))
534
+ )
535
+
536
+ # Center data
537
+ self.mean_ = xp.mean(X, axis=0)
538
+ X -= self.mean_
539
+
540
+ if not is_array_api_compliant:
541
+ # Use scipy.linalg with NumPy/SciPy inputs for the sake of not
542
+ # introducing unanticipated behavior changes. In the long run we
543
+ # could instead decide to always use xp.linalg.svd for all inputs,
544
+ # but that would make this code rely on numpy's SVD instead of
545
+ # scipy's. It's not 100% clear whether they use the same LAPACK
546
+ # solver by default though (assuming both are built against the
547
+ # same BLAS).
548
+ U, S, Vt = linalg.svd(X, full_matrices=False)
549
+ else:
550
+ U, S, Vt = xp.linalg.svd(X, full_matrices=False)
551
+ # flip eigenvectors' sign to enforce deterministic output
552
+ U, Vt = svd_flip(U, Vt)
553
+
554
+ components_ = Vt
555
+
556
+ # Get variance explained by singular values
557
+ explained_variance_ = (S**2) / (n_samples - 1)
558
+ total_var = xp.sum(explained_variance_)
559
+ explained_variance_ratio_ = explained_variance_ / total_var
560
+ singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
561
+
562
+ # Postprocess the number of components required
563
+ if n_components == "mle":
564
+ n_components = _infer_dimension(explained_variance_, n_samples)
565
+ elif 0 < n_components < 1.0:
566
+ # number of components for which the cumulated explained
567
+ # variance percentage is superior to the desired threshold
568
+ # side='right' ensures that number of features selected
569
+ # their variance is always greater than n_components float
570
+ # passed. More discussion in issue: #15669
571
+ if is_array_api_compliant:
572
+ # Convert to numpy as xp.cumsum and xp.searchsorted are not
573
+ # part of the Array API standard yet:
574
+ #
575
+ # https://github.com/data-apis/array-api/issues/597
576
+ # https://github.com/data-apis/array-api/issues/688
577
+ #
578
+ # Furthermore, it's not always safe to call them for namespaces
579
+ # that already implement them: for instance as
580
+ # cupy.searchsorted does not accept a float as second argument.
581
+ explained_variance_ratio_np = _convert_to_numpy(
582
+ explained_variance_ratio_, xp=xp
583
+ )
584
+ else:
585
+ explained_variance_ratio_np = explained_variance_ratio_
586
+ ratio_cumsum = stable_cumsum(explained_variance_ratio_np)
587
+ n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1
588
+
589
+ # Compute noise covariance using Probabilistic PCA model
590
+ # The sigma2 maximum likelihood (cf. eq. 12.46)
591
+ if n_components < min(n_features, n_samples):
592
+ self.noise_variance_ = xp.mean(explained_variance_[n_components:])
593
+ else:
594
+ self.noise_variance_ = 0.0
595
+
596
+ self.n_samples_ = n_samples
597
+ self.components_ = components_[:n_components, :]
598
+ self.n_components_ = n_components
599
+ self.explained_variance_ = explained_variance_[:n_components]
600
+ self.explained_variance_ratio_ = explained_variance_ratio_[:n_components]
601
+ self.singular_values_ = singular_values_[:n_components]
602
+
603
+ return U, S, Vt
604
+
605
+ def _fit_truncated(self, X, n_components, svd_solver):
606
+ """Fit the model by computing truncated SVD (by ARPACK or randomized)
607
+ on X.
608
+ """
609
+ xp, _ = get_namespace(X)
610
+
611
+ n_samples, n_features = X.shape
612
+
613
+ if isinstance(n_components, str):
614
+ raise ValueError(
615
+ "n_components=%r cannot be a string with svd_solver='%s'"
616
+ % (n_components, svd_solver)
617
+ )
618
+ elif not 1 <= n_components <= min(n_samples, n_features):
619
+ raise ValueError(
620
+ "n_components=%r must be between 1 and "
621
+ "min(n_samples, n_features)=%r with "
622
+ "svd_solver='%s'"
623
+ % (n_components, min(n_samples, n_features), svd_solver)
624
+ )
625
+ elif svd_solver == "arpack" and n_components == min(n_samples, n_features):
626
+ raise ValueError(
627
+ "n_components=%r must be strictly less than "
628
+ "min(n_samples, n_features)=%r with "
629
+ "svd_solver='%s'"
630
+ % (n_components, min(n_samples, n_features), svd_solver)
631
+ )
632
+
633
+ random_state = check_random_state(self.random_state)
634
+
635
+ # Center data
636
+ total_var = None
637
+ if issparse(X):
638
+ self.mean_, var = mean_variance_axis(X, axis=0)
639
+ total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1
640
+ X = _implicit_column_offset(X, self.mean_)
641
+ else:
642
+ self.mean_ = xp.mean(X, axis=0)
643
+ X -= self.mean_
644
+
645
+ if svd_solver == "arpack":
646
+ v0 = _init_arpack_v0(min(X.shape), random_state)
647
+ U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0)
648
+ # svds doesn't abide by scipy.linalg.svd/randomized_svd
649
+ # conventions, so reverse its outputs.
650
+ S = S[::-1]
651
+ # flip eigenvectors' sign to enforce deterministic output
652
+ U, Vt = svd_flip(U[:, ::-1], Vt[::-1])
653
+
654
+ elif svd_solver == "randomized":
655
+ # sign flipping is done inside
656
+ U, S, Vt = randomized_svd(
657
+ X,
658
+ n_components=n_components,
659
+ n_oversamples=self.n_oversamples,
660
+ n_iter=self.iterated_power,
661
+ power_iteration_normalizer=self.power_iteration_normalizer,
662
+ flip_sign=True,
663
+ random_state=random_state,
664
+ )
665
+
666
+ self.n_samples_ = n_samples
667
+ self.components_ = Vt
668
+ self.n_components_ = n_components
669
+
670
+ # Get variance explained by singular values
671
+ self.explained_variance_ = (S**2) / (n_samples - 1)
672
+
673
+ # Workaround in-place variance calculation since at the time numpy
674
+ # did not have a way to calculate variance in-place.
675
+ #
676
+ # TODO: update this code to either:
677
+ # * Use the array-api variance calculation, unless memory usage suffers
678
+ # * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api
679
+ # See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991
680
+ if total_var is None:
681
+ N = X.shape[0] - 1
682
+ X **= 2
683
+ total_var = xp.sum(X) / N
684
+
685
+ self.explained_variance_ratio_ = self.explained_variance_ / total_var
686
+ self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
687
+
688
+ if self.n_components_ < min(n_features, n_samples):
689
+ self.noise_variance_ = total_var - xp.sum(self.explained_variance_)
690
+ self.noise_variance_ /= min(n_features, n_samples) - n_components
691
+ else:
692
+ self.noise_variance_ = 0.0
693
+
694
+ return U, S, Vt
695
+
696
+ def score_samples(self, X):
697
+ """Return the log-likelihood of each sample.
698
+
699
+ See. "Pattern Recognition and Machine Learning"
700
+ by C. Bishop, 12.2.1 p. 574
701
+ or http://www.miketipping.com/papers/met-mppca.pdf
702
+
703
+ Parameters
704
+ ----------
705
+ X : array-like of shape (n_samples, n_features)
706
+ The data.
707
+
708
+ Returns
709
+ -------
710
+ ll : ndarray of shape (n_samples,)
711
+ Log-likelihood of each sample under the current model.
712
+ """
713
+ check_is_fitted(self)
714
+ xp, _ = get_namespace(X)
715
+ X = self._validate_data(X, dtype=[xp.float64, xp.float32], reset=False)
716
+ Xr = X - self.mean_
717
+ n_features = X.shape[1]
718
+ precision = self.get_precision()
719
+ log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1)
720
+ log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
721
+ return log_like
722
+
723
+ def score(self, X, y=None):
724
+ """Return the average log-likelihood of all samples.
725
+
726
+ See. "Pattern Recognition and Machine Learning"
727
+ by C. Bishop, 12.2.1 p. 574
728
+ or http://www.miketipping.com/papers/met-mppca.pdf
729
+
730
+ Parameters
731
+ ----------
732
+ X : array-like of shape (n_samples, n_features)
733
+ The data.
734
+
735
+ y : Ignored
736
+ Ignored.
737
+
738
+ Returns
739
+ -------
740
+ ll : float
741
+ Average log-likelihood of the samples under the current model.
742
+ """
743
+ xp, _ = get_namespace(X)
744
+ return float(xp.mean(self.score_samples(X)))
745
+
746
+ def _more_tags(self):
747
+ return {"preserves_dtype": [np.float64, np.float32], "array_api_support": True}
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc ADDED
Binary file (2.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc ADDED
Binary file (5.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py ADDED
@@ -0,0 +1,983 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import warnings
3
+ from functools import partial
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import sklearn
9
+ from sklearn.base import clone
10
+ from sklearn.decomposition import (
11
+ DictionaryLearning,
12
+ MiniBatchDictionaryLearning,
13
+ SparseCoder,
14
+ dict_learning,
15
+ dict_learning_online,
16
+ sparse_encode,
17
+ )
18
+ from sklearn.decomposition._dict_learning import _update_dict
19
+ from sklearn.exceptions import ConvergenceWarning
20
+ from sklearn.utils import check_array
21
+ from sklearn.utils._testing import (
22
+ TempMemmap,
23
+ assert_allclose,
24
+ assert_array_almost_equal,
25
+ assert_array_equal,
26
+ ignore_warnings,
27
+ )
28
+ from sklearn.utils.estimator_checks import (
29
+ check_transformer_data_not_an_array,
30
+ check_transformer_general,
31
+ check_transformers_unfitted,
32
+ )
33
+ from sklearn.utils.parallel import Parallel
34
+
35
+ rng_global = np.random.RandomState(0)
36
+ n_samples, n_features = 10, 8
37
+ X = rng_global.randn(n_samples, n_features)
38
+
39
+
40
+ def test_sparse_encode_shapes_omp():
41
+ rng = np.random.RandomState(0)
42
+ algorithms = ["omp", "lasso_lars", "lasso_cd", "lars", "threshold"]
43
+ for n_components, n_samples in itertools.product([1, 5], [1, 9]):
44
+ X_ = rng.randn(n_samples, n_features)
45
+ dictionary = rng.randn(n_components, n_features)
46
+ for algorithm, n_jobs in itertools.product(algorithms, [1, 2]):
47
+ code = sparse_encode(X_, dictionary, algorithm=algorithm, n_jobs=n_jobs)
48
+ assert code.shape == (n_samples, n_components)
49
+
50
+
51
+ def test_dict_learning_shapes():
52
+ n_components = 5
53
+ dico = DictionaryLearning(n_components, random_state=0).fit(X)
54
+ assert dico.components_.shape == (n_components, n_features)
55
+
56
+ n_components = 1
57
+ dico = DictionaryLearning(n_components, random_state=0).fit(X)
58
+ assert dico.components_.shape == (n_components, n_features)
59
+ assert dico.transform(X).shape == (X.shape[0], n_components)
60
+
61
+
62
+ def test_dict_learning_overcomplete():
63
+ n_components = 12
64
+ dico = DictionaryLearning(n_components, random_state=0).fit(X)
65
+ assert dico.components_.shape == (n_components, n_features)
66
+
67
+
68
+ def test_max_iter():
69
+ def ricker_function(resolution, center, width):
70
+ """Discrete sub-sampled Ricker (Mexican hat) wavelet"""
71
+ x = np.linspace(0, resolution - 1, resolution)
72
+ x = (
73
+ (2 / (np.sqrt(3 * width) * np.pi**0.25))
74
+ * (1 - (x - center) ** 2 / width**2)
75
+ * np.exp(-((x - center) ** 2) / (2 * width**2))
76
+ )
77
+ return x
78
+
79
+ def ricker_matrix(width, resolution, n_components):
80
+ """Dictionary of Ricker (Mexican hat) wavelets"""
81
+ centers = np.linspace(0, resolution - 1, n_components)
82
+ D = np.empty((n_components, resolution))
83
+ for i, center in enumerate(centers):
84
+ D[i] = ricker_function(resolution, center, width)
85
+ D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis]
86
+ return D
87
+
88
+ transform_algorithm = "lasso_cd"
89
+ resolution = 1024
90
+ subsampling = 3 # subsampling factor
91
+ n_components = resolution // subsampling
92
+
93
+ # Compute a wavelet dictionary
94
+ D_multi = np.r_[
95
+ tuple(
96
+ ricker_matrix(
97
+ width=w, resolution=resolution, n_components=n_components // 5
98
+ )
99
+ for w in (10, 50, 100, 500, 1000)
100
+ )
101
+ ]
102
+
103
+ X = np.linspace(0, resolution - 1, resolution)
104
+ first_quarter = X < resolution / 4
105
+ X[first_quarter] = 3.0
106
+ X[np.logical_not(first_quarter)] = -1.0
107
+ X = X.reshape(1, -1)
108
+
109
+ # check that the underlying model fails to converge
110
+ with pytest.warns(ConvergenceWarning):
111
+ model = SparseCoder(
112
+ D_multi, transform_algorithm=transform_algorithm, transform_max_iter=1
113
+ )
114
+ model.fit_transform(X)
115
+
116
+ # check that the underlying model converges w/o warnings
117
+ with warnings.catch_warnings():
118
+ warnings.simplefilter("error", ConvergenceWarning)
119
+ model = SparseCoder(
120
+ D_multi, transform_algorithm=transform_algorithm, transform_max_iter=2000
121
+ )
122
+ model.fit_transform(X)
123
+
124
+
125
+ def test_dict_learning_lars_positive_parameter():
126
+ n_components = 5
127
+ alpha = 1
128
+ err_msg = "Positive constraint not supported for 'lars' coding method."
129
+ with pytest.raises(ValueError, match=err_msg):
130
+ dict_learning(X, n_components, alpha=alpha, positive_code=True)
131
+
132
+
133
+ @pytest.mark.parametrize(
134
+ "transform_algorithm",
135
+ [
136
+ "lasso_lars",
137
+ "lasso_cd",
138
+ "threshold",
139
+ ],
140
+ )
141
+ @pytest.mark.parametrize("positive_code", [False, True])
142
+ @pytest.mark.parametrize("positive_dict", [False, True])
143
+ def test_dict_learning_positivity(transform_algorithm, positive_code, positive_dict):
144
+ n_components = 5
145
+ dico = DictionaryLearning(
146
+ n_components,
147
+ transform_algorithm=transform_algorithm,
148
+ random_state=0,
149
+ positive_code=positive_code,
150
+ positive_dict=positive_dict,
151
+ fit_algorithm="cd",
152
+ ).fit(X)
153
+
154
+ code = dico.transform(X)
155
+ if positive_dict:
156
+ assert (dico.components_ >= 0).all()
157
+ else:
158
+ assert (dico.components_ < 0).any()
159
+ if positive_code:
160
+ assert (code >= 0).all()
161
+ else:
162
+ assert (code < 0).any()
163
+
164
+
165
+ @pytest.mark.parametrize("positive_dict", [False, True])
166
+ def test_dict_learning_lars_dict_positivity(positive_dict):
167
+ n_components = 5
168
+ dico = DictionaryLearning(
169
+ n_components,
170
+ transform_algorithm="lars",
171
+ random_state=0,
172
+ positive_dict=positive_dict,
173
+ fit_algorithm="cd",
174
+ ).fit(X)
175
+
176
+ if positive_dict:
177
+ assert (dico.components_ >= 0).all()
178
+ else:
179
+ assert (dico.components_ < 0).any()
180
+
181
+
182
+ def test_dict_learning_lars_code_positivity():
183
+ n_components = 5
184
+ dico = DictionaryLearning(
185
+ n_components,
186
+ transform_algorithm="lars",
187
+ random_state=0,
188
+ positive_code=True,
189
+ fit_algorithm="cd",
190
+ ).fit(X)
191
+
192
+ err_msg = "Positive constraint not supported for '{}' coding method."
193
+ err_msg = err_msg.format("lars")
194
+ with pytest.raises(ValueError, match=err_msg):
195
+ dico.transform(X)
196
+
197
+
198
+ def test_dict_learning_reconstruction():
199
+ n_components = 12
200
+ dico = DictionaryLearning(
201
+ n_components, transform_algorithm="omp", transform_alpha=0.001, random_state=0
202
+ )
203
+ code = dico.fit(X).transform(X)
204
+ assert_array_almost_equal(np.dot(code, dico.components_), X)
205
+
206
+ dico.set_params(transform_algorithm="lasso_lars")
207
+ code = dico.transform(X)
208
+ assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
209
+
210
+ # used to test lars here too, but there's no guarantee the number of
211
+ # nonzero atoms is right.
212
+
213
+
214
+ def test_dict_learning_reconstruction_parallel():
215
+ # regression test that parallel reconstruction works with n_jobs>1
216
+ n_components = 12
217
+ dico = DictionaryLearning(
218
+ n_components,
219
+ transform_algorithm="omp",
220
+ transform_alpha=0.001,
221
+ random_state=0,
222
+ n_jobs=4,
223
+ )
224
+ code = dico.fit(X).transform(X)
225
+ assert_array_almost_equal(np.dot(code, dico.components_), X)
226
+
227
+ dico.set_params(transform_algorithm="lasso_lars")
228
+ code = dico.transform(X)
229
+ assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
230
+
231
+
232
+ def test_dict_learning_lassocd_readonly_data():
233
+ n_components = 12
234
+ with TempMemmap(X) as X_read_only:
235
+ dico = DictionaryLearning(
236
+ n_components,
237
+ transform_algorithm="lasso_cd",
238
+ transform_alpha=0.001,
239
+ random_state=0,
240
+ n_jobs=4,
241
+ )
242
+ with ignore_warnings(category=ConvergenceWarning):
243
+ code = dico.fit(X_read_only).transform(X_read_only)
244
+ assert_array_almost_equal(
245
+ np.dot(code, dico.components_), X_read_only, decimal=2
246
+ )
247
+
248
+
249
+ def test_dict_learning_nonzero_coefs():
250
+ n_components = 4
251
+ dico = DictionaryLearning(
252
+ n_components,
253
+ transform_algorithm="lars",
254
+ transform_n_nonzero_coefs=3,
255
+ random_state=0,
256
+ )
257
+ code = dico.fit(X).transform(X[np.newaxis, 1])
258
+ assert len(np.flatnonzero(code)) == 3
259
+
260
+ dico.set_params(transform_algorithm="omp")
261
+ code = dico.transform(X[np.newaxis, 1])
262
+ assert len(np.flatnonzero(code)) == 3
263
+
264
+
265
+ def test_dict_learning_split():
266
+ n_components = 5
267
+ dico = DictionaryLearning(
268
+ n_components, transform_algorithm="threshold", random_state=0
269
+ )
270
+ code = dico.fit(X).transform(X)
271
+ dico.split_sign = True
272
+ split_code = dico.transform(X)
273
+
274
+ assert_array_almost_equal(
275
+ split_code[:, :n_components] - split_code[:, n_components:], code
276
+ )
277
+
278
+
279
+ def test_dict_learning_online_shapes():
280
+ rng = np.random.RandomState(0)
281
+ n_components = 8
282
+
283
+ code, dictionary = dict_learning_online(
284
+ X,
285
+ n_components=n_components,
286
+ batch_size=4,
287
+ max_iter=10,
288
+ method="cd",
289
+ random_state=rng,
290
+ return_code=True,
291
+ )
292
+ assert code.shape == (n_samples, n_components)
293
+ assert dictionary.shape == (n_components, n_features)
294
+ assert np.dot(code, dictionary).shape == X.shape
295
+
296
+ dictionary = dict_learning_online(
297
+ X,
298
+ n_components=n_components,
299
+ batch_size=4,
300
+ max_iter=10,
301
+ method="cd",
302
+ random_state=rng,
303
+ return_code=False,
304
+ )
305
+ assert dictionary.shape == (n_components, n_features)
306
+
307
+
308
+ def test_dict_learning_online_lars_positive_parameter():
309
+ err_msg = "Positive constraint not supported for 'lars' coding method."
310
+ with pytest.raises(ValueError, match=err_msg):
311
+ dict_learning_online(X, batch_size=4, max_iter=10, positive_code=True)
312
+
313
+
314
+ @pytest.mark.parametrize(
315
+ "transform_algorithm",
316
+ [
317
+ "lasso_lars",
318
+ "lasso_cd",
319
+ "threshold",
320
+ ],
321
+ )
322
+ @pytest.mark.parametrize("positive_code", [False, True])
323
+ @pytest.mark.parametrize("positive_dict", [False, True])
324
+ def test_minibatch_dictionary_learning_positivity(
325
+ transform_algorithm, positive_code, positive_dict
326
+ ):
327
+ n_components = 8
328
+ dico = MiniBatchDictionaryLearning(
329
+ n_components,
330
+ batch_size=4,
331
+ max_iter=10,
332
+ transform_algorithm=transform_algorithm,
333
+ random_state=0,
334
+ positive_code=positive_code,
335
+ positive_dict=positive_dict,
336
+ fit_algorithm="cd",
337
+ ).fit(X)
338
+
339
+ code = dico.transform(X)
340
+ if positive_dict:
341
+ assert (dico.components_ >= 0).all()
342
+ else:
343
+ assert (dico.components_ < 0).any()
344
+ if positive_code:
345
+ assert (code >= 0).all()
346
+ else:
347
+ assert (code < 0).any()
348
+
349
+
350
+ @pytest.mark.parametrize("positive_dict", [False, True])
351
+ def test_minibatch_dictionary_learning_lars(positive_dict):
352
+ n_components = 8
353
+
354
+ dico = MiniBatchDictionaryLearning(
355
+ n_components,
356
+ batch_size=4,
357
+ max_iter=10,
358
+ transform_algorithm="lars",
359
+ random_state=0,
360
+ positive_dict=positive_dict,
361
+ fit_algorithm="cd",
362
+ ).fit(X)
363
+
364
+ if positive_dict:
365
+ assert (dico.components_ >= 0).all()
366
+ else:
367
+ assert (dico.components_ < 0).any()
368
+
369
+
370
+ @pytest.mark.parametrize("positive_code", [False, True])
371
+ @pytest.mark.parametrize("positive_dict", [False, True])
372
+ def test_dict_learning_online_positivity(positive_code, positive_dict):
373
+ rng = np.random.RandomState(0)
374
+ n_components = 8
375
+
376
+ code, dictionary = dict_learning_online(
377
+ X,
378
+ n_components=n_components,
379
+ batch_size=4,
380
+ method="cd",
381
+ alpha=1,
382
+ random_state=rng,
383
+ positive_dict=positive_dict,
384
+ positive_code=positive_code,
385
+ )
386
+ if positive_dict:
387
+ assert (dictionary >= 0).all()
388
+ else:
389
+ assert (dictionary < 0).any()
390
+ if positive_code:
391
+ assert (code >= 0).all()
392
+ else:
393
+ assert (code < 0).any()
394
+
395
+
396
+ def test_dict_learning_online_verbosity():
397
+ # test verbosity for better coverage
398
+ n_components = 5
399
+ import sys
400
+ from io import StringIO
401
+
402
+ old_stdout = sys.stdout
403
+ try:
404
+ sys.stdout = StringIO()
405
+
406
+ # convergence monitoring verbosity
407
+ dico = MiniBatchDictionaryLearning(
408
+ n_components, batch_size=4, max_iter=5, verbose=1, tol=0.1, random_state=0
409
+ )
410
+ dico.fit(X)
411
+ dico = MiniBatchDictionaryLearning(
412
+ n_components,
413
+ batch_size=4,
414
+ max_iter=5,
415
+ verbose=1,
416
+ max_no_improvement=2,
417
+ random_state=0,
418
+ )
419
+ dico.fit(X)
420
+ # higher verbosity level
421
+ dico = MiniBatchDictionaryLearning(
422
+ n_components, batch_size=4, max_iter=5, verbose=2, random_state=0
423
+ )
424
+ dico.fit(X)
425
+
426
+ # function API verbosity
427
+ dict_learning_online(
428
+ X,
429
+ n_components=n_components,
430
+ batch_size=4,
431
+ alpha=1,
432
+ verbose=1,
433
+ random_state=0,
434
+ )
435
+ dict_learning_online(
436
+ X,
437
+ n_components=n_components,
438
+ batch_size=4,
439
+ alpha=1,
440
+ verbose=2,
441
+ random_state=0,
442
+ )
443
+ finally:
444
+ sys.stdout = old_stdout
445
+
446
+ assert dico.components_.shape == (n_components, n_features)
447
+
448
+
449
+ def test_dict_learning_online_estimator_shapes():
450
+ n_components = 5
451
+ dico = MiniBatchDictionaryLearning(
452
+ n_components, batch_size=4, max_iter=5, random_state=0
453
+ )
454
+ dico.fit(X)
455
+ assert dico.components_.shape == (n_components, n_features)
456
+
457
+
458
+ def test_dict_learning_online_overcomplete():
459
+ n_components = 12
460
+ dico = MiniBatchDictionaryLearning(
461
+ n_components, batch_size=4, max_iter=5, random_state=0
462
+ ).fit(X)
463
+ assert dico.components_.shape == (n_components, n_features)
464
+
465
+
466
+ def test_dict_learning_online_initialization():
467
+ n_components = 12
468
+ rng = np.random.RandomState(0)
469
+ V = rng.randn(n_components, n_features)
470
+ dico = MiniBatchDictionaryLearning(
471
+ n_components, batch_size=4, max_iter=0, dict_init=V, random_state=0
472
+ ).fit(X)
473
+ assert_array_equal(dico.components_, V)
474
+
475
+
476
+ def test_dict_learning_online_readonly_initialization():
477
+ n_components = 12
478
+ rng = np.random.RandomState(0)
479
+ V = rng.randn(n_components, n_features)
480
+ V.setflags(write=False)
481
+ MiniBatchDictionaryLearning(
482
+ n_components,
483
+ batch_size=4,
484
+ max_iter=1,
485
+ dict_init=V,
486
+ random_state=0,
487
+ shuffle=False,
488
+ ).fit(X)
489
+
490
+
491
+ def test_dict_learning_online_partial_fit():
492
+ n_components = 12
493
+ rng = np.random.RandomState(0)
494
+ V = rng.randn(n_components, n_features) # random init
495
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
496
+ dict1 = MiniBatchDictionaryLearning(
497
+ n_components,
498
+ max_iter=10,
499
+ batch_size=1,
500
+ alpha=1,
501
+ shuffle=False,
502
+ dict_init=V,
503
+ max_no_improvement=None,
504
+ tol=0.0,
505
+ random_state=0,
506
+ ).fit(X)
507
+ dict2 = MiniBatchDictionaryLearning(
508
+ n_components, alpha=1, dict_init=V, random_state=0
509
+ )
510
+ for i in range(10):
511
+ for sample in X:
512
+ dict2.partial_fit(sample[np.newaxis, :])
513
+
514
+ assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
515
+ assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2)
516
+
517
+ # partial_fit should ignore max_iter (#17433)
518
+ assert dict1.n_steps_ == dict2.n_steps_ == 100
519
+
520
+
521
+ def test_sparse_encode_shapes():
522
+ n_components = 12
523
+ rng = np.random.RandomState(0)
524
+ V = rng.randn(n_components, n_features) # random init
525
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
526
+ for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"):
527
+ code = sparse_encode(X, V, algorithm=algo)
528
+ assert code.shape == (n_samples, n_components)
529
+
530
+
531
+ @pytest.mark.parametrize("algo", ["lasso_lars", "lasso_cd", "threshold"])
532
+ @pytest.mark.parametrize("positive", [False, True])
533
+ def test_sparse_encode_positivity(algo, positive):
534
+ n_components = 12
535
+ rng = np.random.RandomState(0)
536
+ V = rng.randn(n_components, n_features) # random init
537
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
538
+ code = sparse_encode(X, V, algorithm=algo, positive=positive)
539
+ if positive:
540
+ assert (code >= 0).all()
541
+ else:
542
+ assert (code < 0).any()
543
+
544
+
545
+ @pytest.mark.parametrize("algo", ["lars", "omp"])
546
+ def test_sparse_encode_unavailable_positivity(algo):
547
+ n_components = 12
548
+ rng = np.random.RandomState(0)
549
+ V = rng.randn(n_components, n_features) # random init
550
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
551
+ err_msg = "Positive constraint not supported for '{}' coding method."
552
+ err_msg = err_msg.format(algo)
553
+ with pytest.raises(ValueError, match=err_msg):
554
+ sparse_encode(X, V, algorithm=algo, positive=True)
555
+
556
+
557
+ def test_sparse_encode_input():
558
+ n_components = 100
559
+ rng = np.random.RandomState(0)
560
+ V = rng.randn(n_components, n_features) # random init
561
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
562
+ Xf = check_array(X, order="F")
563
+ for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"):
564
+ a = sparse_encode(X, V, algorithm=algo)
565
+ b = sparse_encode(Xf, V, algorithm=algo)
566
+ assert_array_almost_equal(a, b)
567
+
568
+
569
+ def test_sparse_encode_error():
570
+ n_components = 12
571
+ rng = np.random.RandomState(0)
572
+ V = rng.randn(n_components, n_features) # random init
573
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
574
+ code = sparse_encode(X, V, alpha=0.001)
575
+ assert not np.all(code == 0)
576
+ assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
577
+
578
+
579
+ def test_sparse_encode_error_default_sparsity():
580
+ rng = np.random.RandomState(0)
581
+ X = rng.randn(100, 64)
582
+ D = rng.randn(2, 64)
583
+ code = ignore_warnings(sparse_encode)(X, D, algorithm="omp", n_nonzero_coefs=None)
584
+ assert code.shape == (100, 2)
585
+
586
+
587
+ def test_sparse_coder_estimator():
588
+ n_components = 12
589
+ rng = np.random.RandomState(0)
590
+ V = rng.randn(n_components, n_features) # random init
591
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
592
+ coder = SparseCoder(
593
+ dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001
594
+ ).transform(X)
595
+ assert not np.all(coder == 0)
596
+ assert np.sqrt(np.sum((np.dot(coder, V) - X) ** 2)) < 0.1
597
+
598
+
599
+ def test_sparse_coder_estimator_clone():
600
+ n_components = 12
601
+ rng = np.random.RandomState(0)
602
+ V = rng.randn(n_components, n_features) # random init
603
+ V /= np.sum(V**2, axis=1)[:, np.newaxis]
604
+ coder = SparseCoder(
605
+ dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001
606
+ )
607
+ cloned = clone(coder)
608
+ assert id(cloned) != id(coder)
609
+ np.testing.assert_allclose(cloned.dictionary, coder.dictionary)
610
+ assert id(cloned.dictionary) != id(coder.dictionary)
611
+ assert cloned.n_components_ == coder.n_components_
612
+ assert cloned.n_features_in_ == coder.n_features_in_
613
+ data = np.random.rand(n_samples, n_features).astype(np.float32)
614
+ np.testing.assert_allclose(cloned.transform(data), coder.transform(data))
615
+
616
+
617
+ def test_sparse_coder_parallel_mmap():
618
+ # Non-regression test for:
619
+ # https://github.com/scikit-learn/scikit-learn/issues/5956
620
+ # Test that SparseCoder does not error by passing reading only
621
+ # arrays to child processes
622
+
623
+ rng = np.random.RandomState(777)
624
+ n_components, n_features = 40, 64
625
+ init_dict = rng.rand(n_components, n_features)
626
+ # Ensure that `data` is >2M. Joblib memory maps arrays
627
+ # if they are larger than 1MB. The 4 accounts for float32
628
+ # data type
629
+ n_samples = int(2e6) // (4 * n_features)
630
+ data = np.random.rand(n_samples, n_features).astype(np.float32)
631
+
632
+ sc = SparseCoder(init_dict, transform_algorithm="omp", n_jobs=2)
633
+ sc.fit_transform(data)
634
+
635
+
636
+ def test_sparse_coder_common_transformer():
637
+ rng = np.random.RandomState(777)
638
+ n_components, n_features = 40, 3
639
+ init_dict = rng.rand(n_components, n_features)
640
+
641
+ sc = SparseCoder(init_dict)
642
+
643
+ check_transformer_data_not_an_array(sc.__class__.__name__, sc)
644
+ check_transformer_general(sc.__class__.__name__, sc)
645
+ check_transformer_general_memmap = partial(
646
+ check_transformer_general, readonly_memmap=True
647
+ )
648
+ check_transformer_general_memmap(sc.__class__.__name__, sc)
649
+ check_transformers_unfitted(sc.__class__.__name__, sc)
650
+
651
+
652
+ def test_sparse_coder_n_features_in():
653
+ d = np.array([[1, 2, 3], [1, 2, 3]])
654
+ sc = SparseCoder(d)
655
+ assert sc.n_features_in_ == d.shape[1]
656
+
657
+
658
+ def test_update_dict():
659
+ # Check the dict update in batch mode vs online mode
660
+ # Non-regression test for #4866
661
+ rng = np.random.RandomState(0)
662
+
663
+ code = np.array([[0.5, -0.5], [0.1, 0.9]])
664
+ dictionary = np.array([[1.0, 0.0], [0.6, 0.8]])
665
+
666
+ X = np.dot(code, dictionary) + rng.randn(2, 2)
667
+
668
+ # full batch update
669
+ newd_batch = dictionary.copy()
670
+ _update_dict(newd_batch, X, code)
671
+
672
+ # online update
673
+ A = np.dot(code.T, code)
674
+ B = np.dot(X.T, code)
675
+ newd_online = dictionary.copy()
676
+ _update_dict(newd_online, X, code, A, B)
677
+
678
+ assert_allclose(newd_batch, newd_online)
679
+
680
+
681
+ @pytest.mark.parametrize(
682
+ "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
683
+ )
684
+ @pytest.mark.parametrize("data_type", (np.float32, np.float64))
685
+ # Note: do not check integer input because `lasso_lars` and `lars` fail with
686
+ # `ValueError` in `_lars_path_solver`
687
+ def test_sparse_encode_dtype_match(data_type, algorithm):
688
+ n_components = 6
689
+ rng = np.random.RandomState(0)
690
+ dictionary = rng.randn(n_components, n_features)
691
+ code = sparse_encode(
692
+ X.astype(data_type), dictionary.astype(data_type), algorithm=algorithm
693
+ )
694
+ assert code.dtype == data_type
695
+
696
+
697
+ @pytest.mark.parametrize(
698
+ "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
699
+ )
700
+ def test_sparse_encode_numerical_consistency(algorithm):
701
+ # verify numerical consistency among np.float32 and np.float64
702
+ rtol = 1e-4
703
+ n_components = 6
704
+ rng = np.random.RandomState(0)
705
+ dictionary = rng.randn(n_components, n_features)
706
+ code_32 = sparse_encode(
707
+ X.astype(np.float32), dictionary.astype(np.float32), algorithm=algorithm
708
+ )
709
+ code_64 = sparse_encode(
710
+ X.astype(np.float64), dictionary.astype(np.float64), algorithm=algorithm
711
+ )
712
+ assert_allclose(code_32, code_64, rtol=rtol)
713
+
714
+
715
+ @pytest.mark.parametrize(
716
+ "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
717
+ )
718
+ @pytest.mark.parametrize("data_type", (np.float32, np.float64))
719
+ # Note: do not check integer input because `lasso_lars` and `lars` fail with
720
+ # `ValueError` in `_lars_path_solver`
721
+ def test_sparse_coder_dtype_match(data_type, transform_algorithm):
722
+ # Verify preserving dtype for transform in sparse coder
723
+ n_components = 6
724
+ rng = np.random.RandomState(0)
725
+ dictionary = rng.randn(n_components, n_features)
726
+ coder = SparseCoder(
727
+ dictionary.astype(data_type), transform_algorithm=transform_algorithm
728
+ )
729
+ code = coder.transform(X.astype(data_type))
730
+ assert code.dtype == data_type
731
+
732
+
733
+ @pytest.mark.parametrize("fit_algorithm", ("lars", "cd"))
734
+ @pytest.mark.parametrize(
735
+ "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
736
+ )
737
+ @pytest.mark.parametrize(
738
+ "data_type, expected_type",
739
+ (
740
+ (np.float32, np.float32),
741
+ (np.float64, np.float64),
742
+ (np.int32, np.float64),
743
+ (np.int64, np.float64),
744
+ ),
745
+ )
746
+ def test_dictionary_learning_dtype_match(
747
+ data_type,
748
+ expected_type,
749
+ fit_algorithm,
750
+ transform_algorithm,
751
+ ):
752
+ # Verify preserving dtype for fit and transform in dictionary learning class
753
+ dict_learner = DictionaryLearning(
754
+ n_components=8,
755
+ fit_algorithm=fit_algorithm,
756
+ transform_algorithm=transform_algorithm,
757
+ random_state=0,
758
+ )
759
+ dict_learner.fit(X.astype(data_type))
760
+ assert dict_learner.components_.dtype == expected_type
761
+ assert dict_learner.transform(X.astype(data_type)).dtype == expected_type
762
+
763
+
764
+ @pytest.mark.parametrize("fit_algorithm", ("lars", "cd"))
765
+ @pytest.mark.parametrize(
766
+ "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
767
+ )
768
+ @pytest.mark.parametrize(
769
+ "data_type, expected_type",
770
+ (
771
+ (np.float32, np.float32),
772
+ (np.float64, np.float64),
773
+ (np.int32, np.float64),
774
+ (np.int64, np.float64),
775
+ ),
776
+ )
777
+ def test_minibatch_dictionary_learning_dtype_match(
778
+ data_type,
779
+ expected_type,
780
+ fit_algorithm,
781
+ transform_algorithm,
782
+ ):
783
+ # Verify preserving dtype for fit and transform in minibatch dictionary learning
784
+ dict_learner = MiniBatchDictionaryLearning(
785
+ n_components=8,
786
+ batch_size=10,
787
+ fit_algorithm=fit_algorithm,
788
+ transform_algorithm=transform_algorithm,
789
+ max_iter=100,
790
+ tol=1e-1,
791
+ random_state=0,
792
+ )
793
+ dict_learner.fit(X.astype(data_type))
794
+
795
+ assert dict_learner.components_.dtype == expected_type
796
+ assert dict_learner.transform(X.astype(data_type)).dtype == expected_type
797
+ assert dict_learner._A.dtype == expected_type
798
+ assert dict_learner._B.dtype == expected_type
799
+
800
+
801
+ @pytest.mark.parametrize("method", ("lars", "cd"))
802
+ @pytest.mark.parametrize(
803
+ "data_type, expected_type",
804
+ (
805
+ (np.float32, np.float32),
806
+ (np.float64, np.float64),
807
+ (np.int32, np.float64),
808
+ (np.int64, np.float64),
809
+ ),
810
+ )
811
+ def test_dict_learning_dtype_match(data_type, expected_type, method):
812
+ # Verify output matrix dtype
813
+ rng = np.random.RandomState(0)
814
+ n_components = 8
815
+ code, dictionary, _ = dict_learning(
816
+ X.astype(data_type),
817
+ n_components=n_components,
818
+ alpha=1,
819
+ random_state=rng,
820
+ method=method,
821
+ )
822
+ assert code.dtype == expected_type
823
+ assert dictionary.dtype == expected_type
824
+
825
+
826
+ @pytest.mark.parametrize("method", ("lars", "cd"))
827
+ def test_dict_learning_numerical_consistency(method):
828
+ # verify numerically consistent among np.float32 and np.float64
829
+ rtol = 1e-6
830
+ n_components = 4
831
+ alpha = 2
832
+
833
+ U_64, V_64, _ = dict_learning(
834
+ X.astype(np.float64),
835
+ n_components=n_components,
836
+ alpha=alpha,
837
+ random_state=0,
838
+ method=method,
839
+ )
840
+ U_32, V_32, _ = dict_learning(
841
+ X.astype(np.float32),
842
+ n_components=n_components,
843
+ alpha=alpha,
844
+ random_state=0,
845
+ method=method,
846
+ )
847
+
848
+ # Optimal solution (U*, V*) is not unique.
849
+ # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal,
850
+ # and (column permutated U*, row permutated V*) are also optional
851
+ # as long as holding UV.
852
+ # So here UV, ||U||_1,1 and sum(||V_k||_2^2) are verified
853
+ # instead of comparing directly U and V.
854
+ assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol)
855
+ assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol)
856
+ assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol)
857
+ # verify an obtained solution is not degenerate
858
+ assert np.mean(U_64 != 0.0) > 0.05
859
+ assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0)
860
+
861
+
862
+ @pytest.mark.parametrize("method", ("lars", "cd"))
863
+ @pytest.mark.parametrize(
864
+ "data_type, expected_type",
865
+ (
866
+ (np.float32, np.float32),
867
+ (np.float64, np.float64),
868
+ (np.int32, np.float64),
869
+ (np.int64, np.float64),
870
+ ),
871
+ )
872
+ def test_dict_learning_online_dtype_match(data_type, expected_type, method):
873
+ # Verify output matrix dtype
874
+ rng = np.random.RandomState(0)
875
+ n_components = 8
876
+ code, dictionary = dict_learning_online(
877
+ X.astype(data_type),
878
+ n_components=n_components,
879
+ alpha=1,
880
+ batch_size=10,
881
+ random_state=rng,
882
+ method=method,
883
+ )
884
+ assert code.dtype == expected_type
885
+ assert dictionary.dtype == expected_type
886
+
887
+
888
+ @pytest.mark.parametrize("method", ("lars", "cd"))
889
+ def test_dict_learning_online_numerical_consistency(method):
890
+ # verify numerically consistent among np.float32 and np.float64
891
+ rtol = 1e-4
892
+ n_components = 4
893
+ alpha = 1
894
+
895
+ U_64, V_64 = dict_learning_online(
896
+ X.astype(np.float64),
897
+ n_components=n_components,
898
+ max_iter=1_000,
899
+ alpha=alpha,
900
+ batch_size=10,
901
+ random_state=0,
902
+ method=method,
903
+ tol=0.0,
904
+ max_no_improvement=None,
905
+ )
906
+ U_32, V_32 = dict_learning_online(
907
+ X.astype(np.float32),
908
+ n_components=n_components,
909
+ max_iter=1_000,
910
+ alpha=alpha,
911
+ batch_size=10,
912
+ random_state=0,
913
+ method=method,
914
+ tol=0.0,
915
+ max_no_improvement=None,
916
+ )
917
+
918
+ # Optimal solution (U*, V*) is not unique.
919
+ # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal,
920
+ # and (column permutated U*, row permutated V*) are also optional
921
+ # as long as holding UV.
922
+ # So here UV, ||U||_1,1 and sum(||V_k||_2) are verified
923
+ # instead of comparing directly U and V.
924
+ assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol)
925
+ assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol)
926
+ assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol)
927
+ # verify an obtained solution is not degenerate
928
+ assert np.mean(U_64 != 0.0) > 0.05
929
+ assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0)
930
+
931
+
932
+ @pytest.mark.parametrize(
933
+ "estimator",
934
+ [
935
+ SparseCoder(X.T),
936
+ DictionaryLearning(),
937
+ MiniBatchDictionaryLearning(batch_size=4, max_iter=10),
938
+ ],
939
+ ids=lambda x: x.__class__.__name__,
940
+ )
941
+ def test_get_feature_names_out(estimator):
942
+ """Check feature names for dict learning estimators."""
943
+ estimator.fit(X)
944
+ n_components = X.shape[1]
945
+
946
+ feature_names_out = estimator.get_feature_names_out()
947
+ estimator_name = estimator.__class__.__name__.lower()
948
+ assert_array_equal(
949
+ feature_names_out,
950
+ [f"{estimator_name}{i}" for i in range(n_components)],
951
+ )
952
+
953
+
954
+ def test_cd_work_on_joblib_memmapped_data(monkeypatch):
955
+ monkeypatch.setattr(
956
+ sklearn.decomposition._dict_learning,
957
+ "Parallel",
958
+ partial(Parallel, max_nbytes=100),
959
+ )
960
+
961
+ rng = np.random.RandomState(0)
962
+ X_train = rng.randn(10, 10)
963
+
964
+ dict_learner = DictionaryLearning(
965
+ n_components=5,
966
+ random_state=0,
967
+ n_jobs=2,
968
+ fit_algorithm="cd",
969
+ max_iter=50,
970
+ verbose=True,
971
+ )
972
+
973
+ # This must run and complete without error.
974
+ dict_learner.fit(X_train)
975
+
976
+
977
+ # TODO(1.6): remove in 1.6
978
+ def test_xxx():
979
+ warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed"
980
+ with pytest.warns(FutureWarning, match=warn_msg):
981
+ MiniBatchDictionaryLearning(max_iter=None, random_state=0).fit(X)
982
+ with pytest.warns(FutureWarning, match=warn_msg):
983
+ dict_learning_online(X, max_iter=None, random_state=0)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Christian Osendorfer <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # License: BSD3
4
+
5
+ from itertools import combinations
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from sklearn.decomposition import FactorAnalysis
11
+ from sklearn.decomposition._factor_analysis import _ortho_rotation
12
+ from sklearn.exceptions import ConvergenceWarning
13
+ from sklearn.utils._testing import (
14
+ assert_almost_equal,
15
+ assert_array_almost_equal,
16
+ ignore_warnings,
17
+ )
18
+
19
+
20
+ # Ignore warnings from switching to more power iterations in randomized_svd
21
+ @ignore_warnings
22
+ def test_factor_analysis():
23
+ # Test FactorAnalysis ability to recover the data covariance structure
24
+ rng = np.random.RandomState(0)
25
+ n_samples, n_features, n_components = 20, 5, 3
26
+
27
+ # Some random settings for the generative model
28
+ W = rng.randn(n_components, n_features)
29
+ # latent variable of dim 3, 20 of it
30
+ h = rng.randn(n_samples, n_components)
31
+ # using gamma to model different noise variance
32
+ # per component
33
+ noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
34
+
35
+ # generate observations
36
+ # wlog, mean is 0
37
+ X = np.dot(h, W) + noise
38
+
39
+ fas = []
40
+ for method in ["randomized", "lapack"]:
41
+ fa = FactorAnalysis(n_components=n_components, svd_method=method)
42
+ fa.fit(X)
43
+ fas.append(fa)
44
+
45
+ X_t = fa.transform(X)
46
+ assert X_t.shape == (n_samples, n_components)
47
+
48
+ assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
49
+ assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
50
+
51
+ diff = np.all(np.diff(fa.loglike_))
52
+ assert diff > 0.0, "Log likelihood dif not increase"
53
+
54
+ # Sample Covariance
55
+ scov = np.cov(X, rowvar=0.0, bias=1.0)
56
+
57
+ # Model Covariance
58
+ mcov = fa.get_covariance()
59
+ diff = np.sum(np.abs(scov - mcov)) / W.size
60
+ assert diff < 0.1, "Mean absolute difference is %f" % diff
61
+ fa = FactorAnalysis(
62
+ n_components=n_components, noise_variance_init=np.ones(n_features)
63
+ )
64
+ with pytest.raises(ValueError):
65
+ fa.fit(X[:, :2])
66
+
67
+ def f(x, y):
68
+ return np.abs(getattr(x, y)) # sign will not be equal
69
+
70
+ fa1, fa2 = fas
71
+ for attr in ["loglike_", "components_", "noise_variance_"]:
72
+ assert_almost_equal(f(fa1, attr), f(fa2, attr))
73
+
74
+ fa1.max_iter = 1
75
+ fa1.verbose = True
76
+ with pytest.warns(ConvergenceWarning):
77
+ fa1.fit(X)
78
+
79
+ # Test get_covariance and get_precision with n_components == n_features
80
+ # with n_components < n_features and with n_components == 0
81
+ for n_components in [0, 2, X.shape[1]]:
82
+ fa.n_components = n_components
83
+ fa.fit(X)
84
+ cov = fa.get_covariance()
85
+ precision = fa.get_precision()
86
+ assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
87
+
88
+ # test rotation
89
+ n_components = 2
90
+
91
+ results, projections = {}, {}
92
+ for method in (None, "varimax", "quartimax"):
93
+ fa_var = FactorAnalysis(n_components=n_components, rotation=method)
94
+ results[method] = fa_var.fit_transform(X)
95
+ projections[method] = fa_var.get_covariance()
96
+ for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2):
97
+ assert not np.allclose(results[rot1], results[rot2])
98
+ assert np.allclose(projections[rot1], projections[rot2], atol=3)
99
+
100
+ # test against R's psych::principal with rotate="varimax"
101
+ # (i.e., the values below stem from rotating the components in R)
102
+ # R's factor analysis returns quite different values; therefore, we only
103
+ # test the rotation itself
104
+ factors = np.array(
105
+ [
106
+ [0.89421016, -0.35854928, -0.27770122, 0.03773647],
107
+ [-0.45081822, -0.89132754, 0.0932195, -0.01787973],
108
+ [0.99500666, -0.02031465, 0.05426497, -0.11539407],
109
+ [0.96822861, -0.06299656, 0.24411001, 0.07540887],
110
+ ]
111
+ )
112
+ r_solution = np.array(
113
+ [[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]]
114
+ )
115
+ rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T
116
+ assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the fastica algorithm.
3
+ """
4
+ import itertools
5
+ import os
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytest
10
+ from scipy import stats
11
+
12
+ from sklearn.decomposition import PCA, FastICA, fastica
13
+ from sklearn.decomposition._fastica import _gs_decorrelation
14
+ from sklearn.exceptions import ConvergenceWarning
15
+ from sklearn.utils._testing import assert_allclose
16
+
17
+
18
+ def center_and_norm(x, axis=-1):
19
+ """Centers and norms x **in place**
20
+
21
+ Parameters
22
+ -----------
23
+ x: ndarray
24
+ Array with an axis of observations (statistical units) measured on
25
+ random variables.
26
+ axis: int, optional
27
+ Axis along which the mean and variance are calculated.
28
+ """
29
+ x = np.rollaxis(x, axis)
30
+ x -= x.mean(axis=0)
31
+ x /= x.std(axis=0)
32
+
33
+
34
+ def test_gs():
35
+ # Test gram schmidt orthonormalization
36
+ # generate a random orthogonal matrix
37
+ rng = np.random.RandomState(0)
38
+ W, _, _ = np.linalg.svd(rng.randn(10, 10))
39
+ w = rng.randn(10)
40
+ _gs_decorrelation(w, W, 10)
41
+ assert (w**2).sum() < 1.0e-10
42
+ w = rng.randn(10)
43
+ u = _gs_decorrelation(w, W, 5)
44
+ tmp = np.dot(u, W.T)
45
+ assert (tmp[:5] ** 2).sum() < 1.0e-10
46
+
47
+
48
+ def test_fastica_attributes_dtypes(global_dtype):
49
+ rng = np.random.RandomState(0)
50
+ X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
51
+ fica = FastICA(
52
+ n_components=5, max_iter=1000, whiten="unit-variance", random_state=0
53
+ ).fit(X)
54
+ assert fica.components_.dtype == global_dtype
55
+ assert fica.mixing_.dtype == global_dtype
56
+ assert fica.mean_.dtype == global_dtype
57
+ assert fica.whitening_.dtype == global_dtype
58
+
59
+
60
+ def test_fastica_return_dtypes(global_dtype):
61
+ rng = np.random.RandomState(0)
62
+ X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
63
+ k_, mixing_, s_ = fastica(
64
+ X, max_iter=1000, whiten="unit-variance", random_state=rng
65
+ )
66
+ assert k_.dtype == global_dtype
67
+ assert mixing_.dtype == global_dtype
68
+ assert s_.dtype == global_dtype
69
+
70
+
71
+ @pytest.mark.parametrize("add_noise", [True, False])
72
+ def test_fastica_simple(add_noise, global_random_seed, global_dtype):
73
+ if (
74
+ global_random_seed == 20
75
+ and global_dtype == np.float32
76
+ and not add_noise
77
+ and os.getenv("DISTRIB") == "ubuntu"
78
+ ):
79
+ pytest.xfail(
80
+ "FastICA instability with Ubuntu Atlas build with float32 "
81
+ "global_dtype. For more details, see "
82
+ "https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119" # noqa
83
+ )
84
+
85
+ # Test the FastICA algorithm on very simple data.
86
+ rng = np.random.RandomState(global_random_seed)
87
+ n_samples = 1000
88
+ # Generate two sources:
89
+ s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
90
+ s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed)
91
+ s = np.c_[s1, s2].T
92
+ center_and_norm(s)
93
+ s = s.astype(global_dtype)
94
+ s1, s2 = s
95
+
96
+ # Mixing angle
97
+ phi = 0.6
98
+ mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
99
+ mixing = mixing.astype(global_dtype)
100
+ m = np.dot(mixing, s)
101
+
102
+ if add_noise:
103
+ m += 0.1 * rng.randn(2, 1000)
104
+
105
+ center_and_norm(m)
106
+
107
+ # function as fun arg
108
+ def g_test(x):
109
+ return x**3, (3 * x**2).mean(axis=-1)
110
+
111
+ algos = ["parallel", "deflation"]
112
+ nls = ["logcosh", "exp", "cube", g_test]
113
+ whitening = ["arbitrary-variance", "unit-variance", False]
114
+ for algo, nl, whiten in itertools.product(algos, nls, whitening):
115
+ if whiten:
116
+ k_, mixing_, s_ = fastica(
117
+ m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng
118
+ )
119
+ with pytest.raises(ValueError):
120
+ fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo)
121
+ else:
122
+ pca = PCA(n_components=2, whiten=True, random_state=rng)
123
+ X = pca.fit_transform(m.T)
124
+ k_, mixing_, s_ = fastica(
125
+ X, fun=nl, algorithm=algo, whiten=False, random_state=rng
126
+ )
127
+ with pytest.raises(ValueError):
128
+ fastica(X, fun=np.tanh, algorithm=algo)
129
+ s_ = s_.T
130
+ # Check that the mixing model described in the docstring holds:
131
+ if whiten:
132
+ # XXX: exact reconstruction to standard relative tolerance is not
133
+ # possible. This is probably expected when add_noise is True but we
134
+ # also need a non-trivial atol in float32 when add_noise is False.
135
+ #
136
+ # Note that the 2 sources are non-Gaussian in this test.
137
+ atol = 1e-5 if global_dtype == np.float32 else 0
138
+ assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol)
139
+
140
+ center_and_norm(s_)
141
+ s1_, s2_ = s_
142
+ # Check to see if the sources have been estimated
143
+ # in the wrong order
144
+ if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
145
+ s2_, s1_ = s_
146
+ s1_ *= np.sign(np.dot(s1_, s1))
147
+ s2_ *= np.sign(np.dot(s2_, s2))
148
+
149
+ # Check that we have estimated the original sources
150
+ if not add_noise:
151
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2)
152
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2)
153
+ else:
154
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1)
155
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1)
156
+
157
+ # Test FastICA class
158
+ _, _, sources_fun = fastica(
159
+ m.T, fun=nl, algorithm=algo, random_state=global_random_seed
160
+ )
161
+ ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed)
162
+ sources = ica.fit_transform(m.T)
163
+ assert ica.components_.shape == (2, 2)
164
+ assert sources.shape == (1000, 2)
165
+
166
+ assert_allclose(sources_fun, sources)
167
+ # Set atol to account for the different magnitudes of the elements in sources
168
+ # (from 1e-4 to 1e1).
169
+ atol = np.max(np.abs(sources)) * (1e-5 if global_dtype == np.float32 else 1e-7)
170
+ assert_allclose(sources, ica.transform(m.T), atol=atol)
171
+
172
+ assert ica.mixing_.shape == (2, 2)
173
+
174
+ ica = FastICA(fun=np.tanh, algorithm=algo)
175
+ with pytest.raises(ValueError):
176
+ ica.fit(m.T)
177
+
178
+
179
+ def test_fastica_nowhiten():
180
+ m = [[0, 1], [1, 0]]
181
+
182
+ # test for issue #697
183
+ ica = FastICA(n_components=1, whiten=False, random_state=0)
184
+ warn_msg = "Ignoring n_components with whiten=False."
185
+ with pytest.warns(UserWarning, match=warn_msg):
186
+ ica.fit(m)
187
+ assert hasattr(ica, "mixing_")
188
+
189
+
190
+ def test_fastica_convergence_fail():
191
+ # Test the FastICA algorithm on very simple data
192
+ # (see test_non_square_fastica).
193
+ # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low.
194
+ rng = np.random.RandomState(0)
195
+
196
+ n_samples = 1000
197
+ # Generate two sources:
198
+ t = np.linspace(0, 100, n_samples)
199
+ s1 = np.sin(t)
200
+ s2 = np.ceil(np.sin(np.pi * t))
201
+ s = np.c_[s1, s2].T
202
+ center_and_norm(s)
203
+
204
+ # Mixing matrix
205
+ mixing = rng.randn(6, 2)
206
+ m = np.dot(mixing, s)
207
+
208
+ # Do fastICA with tolerance 0. to ensure failing convergence
209
+ warn_msg = (
210
+ "FastICA did not converge. Consider increasing tolerance "
211
+ "or the maximum number of iterations."
212
+ )
213
+ with pytest.warns(ConvergenceWarning, match=warn_msg):
214
+ ica = FastICA(
215
+ algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.0
216
+ )
217
+ ica.fit(m.T)
218
+
219
+
220
+ @pytest.mark.parametrize("add_noise", [True, False])
221
+ def test_non_square_fastica(add_noise):
222
+ # Test the FastICA algorithm on very simple data.
223
+ rng = np.random.RandomState(0)
224
+
225
+ n_samples = 1000
226
+ # Generate two sources:
227
+ t = np.linspace(0, 100, n_samples)
228
+ s1 = np.sin(t)
229
+ s2 = np.ceil(np.sin(np.pi * t))
230
+ s = np.c_[s1, s2].T
231
+ center_and_norm(s)
232
+ s1, s2 = s
233
+
234
+ # Mixing matrix
235
+ mixing = rng.randn(6, 2)
236
+ m = np.dot(mixing, s)
237
+
238
+ if add_noise:
239
+ m += 0.1 * rng.randn(6, n_samples)
240
+
241
+ center_and_norm(m)
242
+
243
+ k_, mixing_, s_ = fastica(
244
+ m.T, n_components=2, whiten="unit-variance", random_state=rng
245
+ )
246
+ s_ = s_.T
247
+
248
+ # Check that the mixing model described in the docstring holds:
249
+ assert_allclose(s_, np.dot(np.dot(mixing_, k_), m))
250
+
251
+ center_and_norm(s_)
252
+ s1_, s2_ = s_
253
+ # Check to see if the sources have been estimated
254
+ # in the wrong order
255
+ if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
256
+ s2_, s1_ = s_
257
+ s1_ *= np.sign(np.dot(s1_, s1))
258
+ s2_ *= np.sign(np.dot(s2_, s2))
259
+
260
+ # Check that we have estimated the original sources
261
+ if not add_noise:
262
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3)
263
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3)
264
+
265
+
266
+ def test_fit_transform(global_random_seed, global_dtype):
267
+ """Test unit variance of transformed data using FastICA algorithm.
268
+
269
+ Check that `fit_transform` gives the same result as applying
270
+ `fit` and then `transform`.
271
+
272
+ Bug #13056
273
+ """
274
+ # multivariate uniform data in [0, 1]
275
+ rng = np.random.RandomState(global_random_seed)
276
+ X = rng.random_sample((100, 10)).astype(global_dtype)
277
+ max_iter = 300
278
+ for whiten, n_components in [["unit-variance", 5], [False, None]]:
279
+ n_components_ = n_components if n_components is not None else X.shape[1]
280
+
281
+ ica = FastICA(
282
+ n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
283
+ )
284
+ with warnings.catch_warnings():
285
+ # make sure that numerical errors do not cause sqrt of negative
286
+ # values
287
+ warnings.simplefilter("error", RuntimeWarning)
288
+ # XXX: for some seeds, the model does not converge.
289
+ # However this is not what we test here.
290
+ warnings.simplefilter("ignore", ConvergenceWarning)
291
+ Xt = ica.fit_transform(X)
292
+ assert ica.components_.shape == (n_components_, 10)
293
+ assert Xt.shape == (X.shape[0], n_components_)
294
+
295
+ ica2 = FastICA(
296
+ n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
297
+ )
298
+ with warnings.catch_warnings():
299
+ # make sure that numerical errors do not cause sqrt of negative
300
+ # values
301
+ warnings.simplefilter("error", RuntimeWarning)
302
+ warnings.simplefilter("ignore", ConvergenceWarning)
303
+ ica2.fit(X)
304
+ assert ica2.components_.shape == (n_components_, 10)
305
+ Xt2 = ica2.transform(X)
306
+
307
+ # XXX: we have to set atol for this test to pass for all seeds when
308
+ # fitting with float32 data. Is this revealing a bug?
309
+ if global_dtype:
310
+ atol = np.abs(Xt2).mean() / 1e6
311
+ else:
312
+ atol = 0.0 # the default rtol is enough for float64 data
313
+ assert_allclose(Xt, Xt2, atol=atol)
314
+
315
+
316
+ @pytest.mark.filterwarnings("ignore:Ignoring n_components with whiten=False.")
317
+ @pytest.mark.parametrize(
318
+ "whiten, n_components, expected_mixing_shape",
319
+ [
320
+ ("arbitrary-variance", 5, (10, 5)),
321
+ ("arbitrary-variance", 10, (10, 10)),
322
+ ("unit-variance", 5, (10, 5)),
323
+ ("unit-variance", 10, (10, 10)),
324
+ (False, 5, (10, 10)),
325
+ (False, 10, (10, 10)),
326
+ ],
327
+ )
328
+ def test_inverse_transform(
329
+ whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype
330
+ ):
331
+ # Test FastICA.inverse_transform
332
+ n_samples = 100
333
+ rng = np.random.RandomState(global_random_seed)
334
+ X = rng.random_sample((n_samples, 10)).astype(global_dtype)
335
+
336
+ ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten)
337
+ with warnings.catch_warnings():
338
+ # For some dataset (depending on the value of global_dtype) the model
339
+ # can fail to converge but this should not impact the definition of
340
+ # a valid inverse transform.
341
+ warnings.simplefilter("ignore", ConvergenceWarning)
342
+ Xt = ica.fit_transform(X)
343
+ assert ica.mixing_.shape == expected_mixing_shape
344
+ X2 = ica.inverse_transform(Xt)
345
+ assert X.shape == X2.shape
346
+
347
+ # reversibility test in non-reduction case
348
+ if n_components == X.shape[1]:
349
+ # XXX: we have to set atol for this test to pass for all seeds when
350
+ # fitting with float32 data. Is this revealing a bug?
351
+ if global_dtype:
352
+ # XXX: dividing by a smaller number makes
353
+ # tests fail for some seeds.
354
+ atol = np.abs(X2).mean() / 1e5
355
+ else:
356
+ atol = 0.0 # the default rtol is enough for float64 data
357
+ assert_allclose(X, X2, atol=atol)
358
+
359
+
360
+ def test_fastica_errors():
361
+ n_features = 3
362
+ n_samples = 10
363
+ rng = np.random.RandomState(0)
364
+ X = rng.random_sample((n_samples, n_features))
365
+ w_init = rng.randn(n_features + 1, n_features + 1)
366
+ with pytest.raises(ValueError, match=r"alpha must be in \[1,2\]"):
367
+ fastica(X, fun_args={"alpha": 0})
368
+ with pytest.raises(
369
+ ValueError, match="w_init has invalid shape.+" r"should be \(3L?, 3L?\)"
370
+ ):
371
+ fastica(X, w_init=w_init)
372
+
373
+
374
+ def test_fastica_whiten_unit_variance():
375
+ """Test unit variance of transformed data using FastICA algorithm.
376
+
377
+ Bug #13056
378
+ """
379
+ rng = np.random.RandomState(0)
380
+ X = rng.random_sample((100, 10))
381
+ n_components = X.shape[1]
382
+ ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0)
383
+ Xt = ica.fit_transform(X)
384
+
385
+ assert np.var(Xt) == pytest.approx(1.0)
386
+
387
+
388
+ @pytest.mark.parametrize("whiten", ["arbitrary-variance", "unit-variance", False])
389
+ @pytest.mark.parametrize("return_X_mean", [True, False])
390
+ @pytest.mark.parametrize("return_n_iter", [True, False])
391
+ def test_fastica_output_shape(whiten, return_X_mean, return_n_iter):
392
+ n_features = 3
393
+ n_samples = 10
394
+ rng = np.random.RandomState(0)
395
+ X = rng.random_sample((n_samples, n_features))
396
+
397
+ expected_len = 3 + return_X_mean + return_n_iter
398
+
399
+ out = fastica(
400
+ X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean
401
+ )
402
+
403
+ assert len(out) == expected_len
404
+ if not whiten:
405
+ assert out[0] is None
406
+
407
+
408
+ @pytest.mark.parametrize("add_noise", [True, False])
409
+ def test_fastica_simple_different_solvers(add_noise, global_random_seed):
410
+ """Test FastICA is consistent between whiten_solvers."""
411
+ rng = np.random.RandomState(global_random_seed)
412
+ n_samples = 1000
413
+ # Generate two sources:
414
+ s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
415
+ s2 = stats.t.rvs(1, size=n_samples, random_state=rng)
416
+ s = np.c_[s1, s2].T
417
+ center_and_norm(s)
418
+ s1, s2 = s
419
+
420
+ # Mixing angle
421
+ phi = rng.rand() * 2 * np.pi
422
+ mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
423
+ m = np.dot(mixing, s)
424
+
425
+ if add_noise:
426
+ m += 0.1 * rng.randn(2, 1000)
427
+
428
+ center_and_norm(m)
429
+
430
+ outs = {}
431
+ for solver in ("svd", "eigh"):
432
+ ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver=solver)
433
+ sources = ica.fit_transform(m.T)
434
+ outs[solver] = sources
435
+ assert ica.components_.shape == (2, 2)
436
+ assert sources.shape == (1000, 2)
437
+
438
+ # compared numbers are not all on the same magnitude. Using a small atol to
439
+ # make the test less brittle
440
+ assert_allclose(outs["eigh"], outs["svd"], atol=1e-12)
441
+
442
+
443
+ def test_fastica_eigh_low_rank_warning(global_random_seed):
444
+ """Test FastICA eigh solver raises warning for low-rank data."""
445
+ rng = np.random.RandomState(global_random_seed)
446
+ A = rng.randn(10, 2)
447
+ X = A @ A.T
448
+ ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh")
449
+ msg = "There are some small singular values"
450
+ with pytest.warns(UserWarning, match=msg):
451
+ ica.fit(X)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for Incremental PCA."""
2
+ import warnings
3
+
4
+ import numpy as np
5
+ import pytest
6
+ from numpy.testing import assert_array_equal
7
+
8
+ from sklearn import datasets
9
+ from sklearn.decomposition import PCA, IncrementalPCA
10
+ from sklearn.utils._testing import (
11
+ assert_allclose_dense_sparse,
12
+ assert_almost_equal,
13
+ assert_array_almost_equal,
14
+ )
15
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
16
+
17
+ iris = datasets.load_iris()
18
+
19
+
20
+ def test_incremental_pca():
21
+ # Incremental PCA on dense arrays.
22
+ X = iris.data
23
+ batch_size = X.shape[0] // 3
24
+ ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
25
+ pca = PCA(n_components=2)
26
+ pca.fit_transform(X)
27
+
28
+ X_transformed = ipca.fit_transform(X)
29
+
30
+ assert X_transformed.shape == (X.shape[0], 2)
31
+ np.testing.assert_allclose(
32
+ ipca.explained_variance_ratio_.sum(),
33
+ pca.explained_variance_ratio_.sum(),
34
+ rtol=1e-3,
35
+ )
36
+
37
+ for n_components in [1, 2, X.shape[1]]:
38
+ ipca = IncrementalPCA(n_components, batch_size=batch_size)
39
+ ipca.fit(X)
40
+ cov = ipca.get_covariance()
41
+ precision = ipca.get_precision()
42
+ np.testing.assert_allclose(
43
+ np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13
44
+ )
45
+
46
+
47
+ @pytest.mark.parametrize(
48
+ "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS
49
+ )
50
+ def test_incremental_pca_sparse(sparse_container):
51
+ # Incremental PCA on sparse arrays.
52
+ X = iris.data
53
+ pca = PCA(n_components=2)
54
+ pca.fit_transform(X)
55
+ X_sparse = sparse_container(X)
56
+ batch_size = X_sparse.shape[0] // 3
57
+ ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
58
+
59
+ X_transformed = ipca.fit_transform(X_sparse)
60
+
61
+ assert X_transformed.shape == (X_sparse.shape[0], 2)
62
+ np.testing.assert_allclose(
63
+ ipca.explained_variance_ratio_.sum(),
64
+ pca.explained_variance_ratio_.sum(),
65
+ rtol=1e-3,
66
+ )
67
+
68
+ for n_components in [1, 2, X.shape[1]]:
69
+ ipca = IncrementalPCA(n_components, batch_size=batch_size)
70
+ ipca.fit(X_sparse)
71
+ cov = ipca.get_covariance()
72
+ precision = ipca.get_precision()
73
+ np.testing.assert_allclose(
74
+ np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13
75
+ )
76
+
77
+ with pytest.raises(
78
+ TypeError,
79
+ match=(
80
+ "IncrementalPCA.partial_fit does not support "
81
+ "sparse input. Either convert data to dense "
82
+ "or use IncrementalPCA.fit to do so in batches."
83
+ ),
84
+ ):
85
+ ipca.partial_fit(X_sparse)
86
+
87
+
88
+ def test_incremental_pca_check_projection():
89
+ # Test that the projection of data is correct.
90
+ rng = np.random.RandomState(1999)
91
+ n, p = 100, 3
92
+ X = rng.randn(n, p) * 0.1
93
+ X[:10] += np.array([3, 4, 5])
94
+ Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
95
+
96
+ # Get the reconstruction of the generated data X
97
+ # Note that Xt has the same "components" as X, just separated
98
+ # This is what we want to ensure is recreated correctly
99
+ Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
100
+
101
+ # Normalize
102
+ Yt /= np.sqrt((Yt**2).sum())
103
+
104
+ # Make sure that the first element of Yt is ~1, this means
105
+ # the reconstruction worked as expected
106
+ assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1)
107
+
108
+
109
+ def test_incremental_pca_inverse():
110
+ # Test that the projection of data can be inverted.
111
+ rng = np.random.RandomState(1999)
112
+ n, p = 50, 3
113
+ X = rng.randn(n, p) # spherical data
114
+ X[:, 1] *= 0.00001 # make middle component relatively small
115
+ X += [5, 4, 3] # make a large mean
116
+
117
+ # same check that we can find the original data from the transformed
118
+ # signal (since the data is almost of rank n_components)
119
+ ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
120
+ Y = ipca.transform(X)
121
+ Y_inverse = ipca.inverse_transform(Y)
122
+ assert_almost_equal(X, Y_inverse, decimal=3)
123
+
124
+
125
+ def test_incremental_pca_validation():
126
+ # Test that n_components is <= n_features.
127
+ X = np.array([[0, 1, 0], [1, 0, 0]])
128
+ n_samples, n_features = X.shape
129
+ n_components = 4
130
+ with pytest.raises(
131
+ ValueError,
132
+ match=(
133
+ "n_components={} invalid"
134
+ " for n_features={}, need more rows than"
135
+ " columns for IncrementalPCA"
136
+ " processing".format(n_components, n_features)
137
+ ),
138
+ ):
139
+ IncrementalPCA(n_components, batch_size=10).fit(X)
140
+
141
+ # Tests that n_components is also <= n_samples.
142
+ n_components = 3
143
+ with pytest.raises(
144
+ ValueError,
145
+ match=(
146
+ "n_components={} must be"
147
+ " less or equal to the batch number of"
148
+ " samples {}".format(n_components, n_samples)
149
+ ),
150
+ ):
151
+ IncrementalPCA(n_components=n_components).partial_fit(X)
152
+
153
+
154
+ def test_n_samples_equal_n_components():
155
+ # Ensures no warning is raised when n_samples==n_components
156
+ # Non-regression test for gh-19050
157
+ ipca = IncrementalPCA(n_components=5)
158
+ with warnings.catch_warnings():
159
+ warnings.simplefilter("error", RuntimeWarning)
160
+ ipca.partial_fit(np.random.randn(5, 7))
161
+ with warnings.catch_warnings():
162
+ warnings.simplefilter("error", RuntimeWarning)
163
+ ipca.fit(np.random.randn(5, 7))
164
+
165
+
166
+ def test_n_components_none():
167
+ # Ensures that n_components == None is handled correctly
168
+ rng = np.random.RandomState(1999)
169
+ for n_samples, n_features in [(50, 10), (10, 50)]:
170
+ X = rng.rand(n_samples, n_features)
171
+ ipca = IncrementalPCA(n_components=None)
172
+
173
+ # First partial_fit call, ipca.n_components_ is inferred from
174
+ # min(X.shape)
175
+ ipca.partial_fit(X)
176
+ assert ipca.n_components_ == min(X.shape)
177
+
178
+ # Second partial_fit call, ipca.n_components_ is inferred from
179
+ # ipca.components_ computed from the first partial_fit call
180
+ ipca.partial_fit(X)
181
+ assert ipca.n_components_ == ipca.components_.shape[0]
182
+
183
+
184
+ def test_incremental_pca_set_params():
185
+ # Test that components_ sign is stable over batch sizes.
186
+ rng = np.random.RandomState(1999)
187
+ n_samples = 100
188
+ n_features = 20
189
+ X = rng.randn(n_samples, n_features)
190
+ X2 = rng.randn(n_samples, n_features)
191
+ X3 = rng.randn(n_samples, n_features)
192
+ ipca = IncrementalPCA(n_components=20)
193
+ ipca.fit(X)
194
+ # Decreasing number of components
195
+ ipca.set_params(n_components=10)
196
+ with pytest.raises(ValueError):
197
+ ipca.partial_fit(X2)
198
+ # Increasing number of components
199
+ ipca.set_params(n_components=15)
200
+ with pytest.raises(ValueError):
201
+ ipca.partial_fit(X3)
202
+ # Returning to original setting
203
+ ipca.set_params(n_components=20)
204
+ ipca.partial_fit(X)
205
+
206
+
207
+ def test_incremental_pca_num_features_change():
208
+ # Test that changing n_components will raise an error.
209
+ rng = np.random.RandomState(1999)
210
+ n_samples = 100
211
+ X = rng.randn(n_samples, 20)
212
+ X2 = rng.randn(n_samples, 50)
213
+ ipca = IncrementalPCA(n_components=None)
214
+ ipca.fit(X)
215
+ with pytest.raises(ValueError):
216
+ ipca.partial_fit(X2)
217
+
218
+
219
+ def test_incremental_pca_batch_signs():
220
+ # Test that components_ sign is stable over batch sizes.
221
+ rng = np.random.RandomState(1999)
222
+ n_samples = 100
223
+ n_features = 3
224
+ X = rng.randn(n_samples, n_features)
225
+ all_components = []
226
+ batch_sizes = np.arange(10, 20)
227
+ for batch_size in batch_sizes:
228
+ ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
229
+ all_components.append(ipca.components_)
230
+
231
+ for i, j in zip(all_components[:-1], all_components[1:]):
232
+ assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
233
+
234
+
235
+ def test_incremental_pca_batch_values():
236
+ # Test that components_ values are stable over batch sizes.
237
+ rng = np.random.RandomState(1999)
238
+ n_samples = 100
239
+ n_features = 3
240
+ X = rng.randn(n_samples, n_features)
241
+ all_components = []
242
+ batch_sizes = np.arange(20, 40, 3)
243
+ for batch_size in batch_sizes:
244
+ ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
245
+ all_components.append(ipca.components_)
246
+
247
+ for i, j in zip(all_components[:-1], all_components[1:]):
248
+ assert_almost_equal(i, j, decimal=1)
249
+
250
+
251
+ def test_incremental_pca_batch_rank():
252
+ # Test sample size in each batch is always larger or equal to n_components
253
+ rng = np.random.RandomState(1999)
254
+ n_samples = 100
255
+ n_features = 20
256
+ X = rng.randn(n_samples, n_features)
257
+ all_components = []
258
+ batch_sizes = np.arange(20, 90, 3)
259
+ for batch_size in batch_sizes:
260
+ ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X)
261
+ all_components.append(ipca.components_)
262
+
263
+ for components_i, components_j in zip(all_components[:-1], all_components[1:]):
264
+ assert_allclose_dense_sparse(components_i, components_j)
265
+
266
+
267
+ def test_incremental_pca_partial_fit():
268
+ # Test that fit and partial_fit get equivalent results.
269
+ rng = np.random.RandomState(1999)
270
+ n, p = 50, 3
271
+ X = rng.randn(n, p) # spherical data
272
+ X[:, 1] *= 0.00001 # make middle component relatively small
273
+ X += [5, 4, 3] # make a large mean
274
+
275
+ # same check that we can find the original data from the transformed
276
+ # signal (since the data is almost of rank n_components)
277
+ batch_size = 10
278
+ ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
279
+ pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
280
+ # Add one to make sure endpoint is included
281
+ batch_itr = np.arange(0, n + 1, batch_size)
282
+ for i, j in zip(batch_itr[:-1], batch_itr[1:]):
283
+ pipca.partial_fit(X[i:j, :])
284
+ assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
285
+
286
+
287
+ def test_incremental_pca_against_pca_iris():
288
+ # Test that IncrementalPCA and PCA are approximate (to a sign flip).
289
+ X = iris.data
290
+
291
+ Y_pca = PCA(n_components=2).fit_transform(X)
292
+ Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
293
+
294
+ assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
295
+
296
+
297
+ def test_incremental_pca_against_pca_random_data():
298
+ # Test that IncrementalPCA and PCA are approximate (to a sign flip).
299
+ rng = np.random.RandomState(1999)
300
+ n_samples = 100
301
+ n_features = 3
302
+ X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
303
+
304
+ Y_pca = PCA(n_components=3).fit_transform(X)
305
+ Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
306
+
307
+ assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
308
+
309
+
310
+ def test_explained_variances():
311
+ # Test that PCA and IncrementalPCA calculations match
312
+ X = datasets.make_low_rank_matrix(
313
+ 1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999
314
+ )
315
+ prec = 3
316
+ n_samples, n_features = X.shape
317
+ for nc in [None, 99]:
318
+ pca = PCA(n_components=nc).fit(X)
319
+ ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
320
+ assert_almost_equal(
321
+ pca.explained_variance_, ipca.explained_variance_, decimal=prec
322
+ )
323
+ assert_almost_equal(
324
+ pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec
325
+ )
326
+ assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec)
327
+
328
+
329
+ def test_singular_values():
330
+ # Check that the IncrementalPCA output has the correct singular values
331
+
332
+ rng = np.random.RandomState(0)
333
+ n_samples = 1000
334
+ n_features = 100
335
+
336
+ X = datasets.make_low_rank_matrix(
337
+ n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng
338
+ )
339
+
340
+ pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X)
341
+ ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
342
+ assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
343
+
344
+ # Compare to the Frobenius norm
345
+ X_pca = pca.transform(X)
346
+ X_ipca = ipca.transform(X)
347
+ assert_array_almost_equal(
348
+ np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12
349
+ )
350
+ assert_array_almost_equal(
351
+ np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2
352
+ )
353
+
354
+ # Compare to the 2-norms of the score vectors
355
+ assert_array_almost_equal(
356
+ pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12
357
+ )
358
+ assert_array_almost_equal(
359
+ ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2
360
+ )
361
+
362
+ # Set the singular values and see what we get back
363
+ rng = np.random.RandomState(0)
364
+ n_samples = 100
365
+ n_features = 110
366
+
367
+ X = datasets.make_low_rank_matrix(
368
+ n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng
369
+ )
370
+
371
+ pca = PCA(n_components=3, svd_solver="full", random_state=rng)
372
+ ipca = IncrementalPCA(n_components=3, batch_size=100)
373
+
374
+ X_pca = pca.fit_transform(X)
375
+ X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
376
+ X_pca[:, 0] *= 3.142
377
+ X_pca[:, 1] *= 2.718
378
+
379
+ X_hat = np.dot(X_pca, pca.components_)
380
+ pca.fit(X_hat)
381
+ ipca.fit(X_hat)
382
+ assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
383
+ assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
384
+
385
+
386
+ def test_whitening():
387
+ # Test that PCA and IncrementalPCA transforms match to sign flip.
388
+ X = datasets.make_low_rank_matrix(
389
+ 1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999
390
+ )
391
+ prec = 3
392
+ n_samples, n_features = X.shape
393
+ for nc in [None, 9]:
394
+ pca = PCA(whiten=True, n_components=nc).fit(X)
395
+ ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X)
396
+
397
+ Xt_pca = pca.transform(X)
398
+ Xt_ipca = ipca.transform(X)
399
+ assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
400
+ Xinv_ipca = ipca.inverse_transform(Xt_ipca)
401
+ Xinv_pca = pca.inverse_transform(Xt_pca)
402
+ assert_almost_equal(X, Xinv_ipca, decimal=prec)
403
+ assert_almost_equal(X, Xinv_pca, decimal=prec)
404
+ assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
405
+
406
+
407
+ def test_incremental_pca_partial_fit_float_division():
408
+ # Test to ensure float division is used in all versions of Python
409
+ # (non-regression test for issue #9489)
410
+
411
+ rng = np.random.RandomState(0)
412
+ A = rng.randn(5, 3) + 2
413
+ B = rng.randn(7, 3) + 5
414
+
415
+ pca = IncrementalPCA(n_components=2)
416
+ pca.partial_fit(A)
417
+ # Set n_samples_seen_ to be a floating point number instead of an int
418
+ pca.n_samples_seen_ = float(pca.n_samples_seen_)
419
+ pca.partial_fit(B)
420
+ singular_vals_float_samples_seen = pca.singular_values_
421
+
422
+ pca2 = IncrementalPCA(n_components=2)
423
+ pca2.partial_fit(A)
424
+ pca2.partial_fit(B)
425
+ singular_vals_int_samples_seen = pca2.singular_values_
426
+
427
+ np.testing.assert_allclose(
428
+ singular_vals_float_samples_seen, singular_vals_int_samples_seen
429
+ )
430
+
431
+
432
+ def test_incremental_pca_fit_overflow_error():
433
+ # Test for overflow error on Windows OS
434
+ # (non-regression test for issue #17693)
435
+ rng = np.random.RandomState(0)
436
+ A = rng.rand(500000, 2)
437
+
438
+ ipca = IncrementalPCA(n_components=2, batch_size=10000)
439
+ ipca.fit(A)
440
+
441
+ pca = PCA(n_components=2)
442
+ pca.fit(A)
443
+
444
+ np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_)
445
+
446
+
447
+ def test_incremental_pca_feature_names_out():
448
+ """Check feature names out for IncrementalPCA."""
449
+ ipca = IncrementalPCA(n_components=2).fit(iris.data)
450
+
451
+ names = ipca.get_feature_names_out()
452
+ assert_array_equal([f"incrementalpca{i}" for i in range(2)], names)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import sklearn
7
+ from sklearn.datasets import load_iris, make_blobs, make_circles
8
+ from sklearn.decomposition import PCA, KernelPCA
9
+ from sklearn.exceptions import NotFittedError
10
+ from sklearn.linear_model import Perceptron
11
+ from sklearn.metrics.pairwise import rbf_kernel
12
+ from sklearn.model_selection import GridSearchCV
13
+ from sklearn.pipeline import Pipeline
14
+ from sklearn.preprocessing import StandardScaler
15
+ from sklearn.utils._testing import (
16
+ assert_allclose,
17
+ assert_array_almost_equal,
18
+ assert_array_equal,
19
+ )
20
+ from sklearn.utils.fixes import CSR_CONTAINERS
21
+ from sklearn.utils.validation import _check_psd_eigenvalues
22
+
23
+
24
+ def test_kernel_pca():
25
+ """Nominal test for all solvers and all known kernels + a custom one
26
+
27
+ It tests
28
+ - that fit_transform is equivalent to fit+transform
29
+ - that the shapes of transforms and inverse transforms are correct
30
+ """
31
+ rng = np.random.RandomState(0)
32
+ X_fit = rng.random_sample((5, 4))
33
+ X_pred = rng.random_sample((2, 4))
34
+
35
+ def histogram(x, y, **kwargs):
36
+ # Histogram kernel implemented as a callable.
37
+ assert kwargs == {} # no kernel_params that we didn't ask for
38
+ return np.minimum(x, y).sum()
39
+
40
+ for eigen_solver in ("auto", "dense", "arpack", "randomized"):
41
+ for kernel in ("linear", "rbf", "poly", histogram):
42
+ # histogram kernel produces singular matrix inside linalg.solve
43
+ # XXX use a least-squares approximation?
44
+ inv = not callable(kernel)
45
+
46
+ # transform fit data
47
+ kpca = KernelPCA(
48
+ 4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv
49
+ )
50
+ X_fit_transformed = kpca.fit_transform(X_fit)
51
+ X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
52
+ assert_array_almost_equal(
53
+ np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
54
+ )
55
+
56
+ # non-regression test: previously, gamma would be 0 by default,
57
+ # forcing all eigenvalues to 0 under the poly kernel
58
+ assert X_fit_transformed.size != 0
59
+
60
+ # transform new data
61
+ X_pred_transformed = kpca.transform(X_pred)
62
+ assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
63
+
64
+ # inverse transform
65
+ if inv:
66
+ X_pred2 = kpca.inverse_transform(X_pred_transformed)
67
+ assert X_pred2.shape == X_pred.shape
68
+
69
+
70
+ def test_kernel_pca_invalid_parameters():
71
+ """Check that kPCA raises an error if the parameters are invalid
72
+
73
+ Tests fitting inverse transform with a precomputed kernel raises a
74
+ ValueError.
75
+ """
76
+ estimator = KernelPCA(
77
+ n_components=10, fit_inverse_transform=True, kernel="precomputed"
78
+ )
79
+ err_ms = "Cannot fit_inverse_transform with a precomputed kernel"
80
+ with pytest.raises(ValueError, match=err_ms):
81
+ estimator.fit(np.random.randn(10, 10))
82
+
83
+
84
+ def test_kernel_pca_consistent_transform():
85
+ """Check robustness to mutations in the original training array
86
+
87
+ Test that after fitting a kPCA model, it stays independent of any
88
+ mutation of the values of the original data object by relying on an
89
+ internal copy.
90
+ """
91
+ # X_fit_ needs to retain the old, unmodified copy of X
92
+ state = np.random.RandomState(0)
93
+ X = state.rand(10, 10)
94
+ kpca = KernelPCA(random_state=state).fit(X)
95
+ transformed1 = kpca.transform(X)
96
+
97
+ X_copy = X.copy()
98
+ X[:, 0] = 666
99
+ transformed2 = kpca.transform(X_copy)
100
+ assert_array_almost_equal(transformed1, transformed2)
101
+
102
+
103
+ def test_kernel_pca_deterministic_output():
104
+ """Test that Kernel PCA produces deterministic output
105
+
106
+ Tests that the same inputs and random state produce the same output.
107
+ """
108
+ rng = np.random.RandomState(0)
109
+ X = rng.rand(10, 10)
110
+ eigen_solver = ("arpack", "dense")
111
+
112
+ for solver in eigen_solver:
113
+ transformed_X = np.zeros((20, 2))
114
+ for i in range(20):
115
+ kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng)
116
+ transformed_X[i, :] = kpca.fit_transform(X)[0]
117
+ assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
118
+
119
+
120
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
121
+ def test_kernel_pca_sparse(csr_container):
122
+ """Test that kPCA works on a sparse data input.
123
+
124
+ Same test as ``test_kernel_pca except inverse_transform`` since it's not
125
+ implemented for sparse matrices.
126
+ """
127
+ rng = np.random.RandomState(0)
128
+ X_fit = csr_container(rng.random_sample((5, 4)))
129
+ X_pred = csr_container(rng.random_sample((2, 4)))
130
+
131
+ for eigen_solver in ("auto", "arpack", "randomized"):
132
+ for kernel in ("linear", "rbf", "poly"):
133
+ # transform fit data
134
+ kpca = KernelPCA(
135
+ 4,
136
+ kernel=kernel,
137
+ eigen_solver=eigen_solver,
138
+ fit_inverse_transform=False,
139
+ random_state=0,
140
+ )
141
+ X_fit_transformed = kpca.fit_transform(X_fit)
142
+ X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
143
+ assert_array_almost_equal(
144
+ np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
145
+ )
146
+
147
+ # transform new data
148
+ X_pred_transformed = kpca.transform(X_pred)
149
+ assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
150
+
151
+ # inverse transform: not available for sparse matrices
152
+ # XXX: should we raise another exception type here? For instance:
153
+ # NotImplementedError.
154
+ with pytest.raises(NotFittedError):
155
+ kpca.inverse_transform(X_pred_transformed)
156
+
157
+
158
+ @pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
159
+ @pytest.mark.parametrize("n_features", [4, 10])
160
+ def test_kernel_pca_linear_kernel(solver, n_features):
161
+ """Test that kPCA with linear kernel is equivalent to PCA for all solvers.
162
+
163
+ KernelPCA with linear kernel should produce the same output as PCA.
164
+ """
165
+ rng = np.random.RandomState(0)
166
+ X_fit = rng.random_sample((5, n_features))
167
+ X_pred = rng.random_sample((2, n_features))
168
+
169
+ # for a linear kernel, kernel PCA should find the same projection as PCA
170
+ # modulo the sign (direction)
171
+ # fit only the first four components: fifth is near zero eigenvalue, so
172
+ # can be trimmed due to roundoff error
173
+ n_comps = 3 if solver == "arpack" else 4
174
+ assert_array_almost_equal(
175
+ np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)),
176
+ np.abs(
177
+ PCA(n_comps, svd_solver=solver if solver != "dense" else "full")
178
+ .fit(X_fit)
179
+ .transform(X_pred)
180
+ ),
181
+ )
182
+
183
+
184
+ def test_kernel_pca_n_components():
185
+ """Test that `n_components` is correctly taken into account for projections
186
+
187
+ For all solvers this tests that the output has the correct shape depending
188
+ on the selected number of components.
189
+ """
190
+ rng = np.random.RandomState(0)
191
+ X_fit = rng.random_sample((5, 4))
192
+ X_pred = rng.random_sample((2, 4))
193
+
194
+ for eigen_solver in ("dense", "arpack", "randomized"):
195
+ for c in [1, 2, 4]:
196
+ kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
197
+ shape = kpca.fit(X_fit).transform(X_pred).shape
198
+
199
+ assert shape == (2, c)
200
+
201
+
202
+ def test_remove_zero_eig():
203
+ """Check that the ``remove_zero_eig`` parameter works correctly.
204
+
205
+ Tests that the null-space (Zero) eigenvalues are removed when
206
+ remove_zero_eig=True, whereas they are not by default.
207
+ """
208
+ X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
209
+
210
+ # n_components=None (default) => remove_zero_eig is True
211
+ kpca = KernelPCA()
212
+ Xt = kpca.fit_transform(X)
213
+ assert Xt.shape == (3, 0)
214
+
215
+ kpca = KernelPCA(n_components=2)
216
+ Xt = kpca.fit_transform(X)
217
+ assert Xt.shape == (3, 2)
218
+
219
+ kpca = KernelPCA(n_components=2, remove_zero_eig=True)
220
+ Xt = kpca.fit_transform(X)
221
+ assert Xt.shape == (3, 0)
222
+
223
+
224
+ def test_leave_zero_eig():
225
+ """Non-regression test for issue #12141 (PR #12143)
226
+
227
+ This test checks that fit().transform() returns the same result as
228
+ fit_transform() in case of non-removed zero eigenvalue.
229
+ """
230
+ X_fit = np.array([[1, 1], [0, 0]])
231
+
232
+ # Assert that even with all np warnings on, there is no div by zero warning
233
+ with warnings.catch_warnings():
234
+ # There might be warnings about the kernel being badly conditioned,
235
+ # but there should not be warnings about division by zero.
236
+ # (Numpy division by zero warning can have many message variants, but
237
+ # at least we know that it is a RuntimeWarning so lets check only this)
238
+ warnings.simplefilter("error", RuntimeWarning)
239
+ with np.errstate(all="warn"):
240
+ k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense")
241
+ # Fit, then transform
242
+ A = k.fit(X_fit).transform(X_fit)
243
+ # Do both at once
244
+ B = k.fit_transform(X_fit)
245
+ # Compare
246
+ assert_array_almost_equal(np.abs(A), np.abs(B))
247
+
248
+
249
+ def test_kernel_pca_precomputed():
250
+ """Test that kPCA works with a precomputed kernel, for all solvers"""
251
+ rng = np.random.RandomState(0)
252
+ X_fit = rng.random_sample((5, 4))
253
+ X_pred = rng.random_sample((2, 4))
254
+
255
+ for eigen_solver in ("dense", "arpack", "randomized"):
256
+ X_kpca = (
257
+ KernelPCA(4, eigen_solver=eigen_solver, random_state=0)
258
+ .fit(X_fit)
259
+ .transform(X_pred)
260
+ )
261
+
262
+ X_kpca2 = (
263
+ KernelPCA(
264
+ 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
265
+ )
266
+ .fit(np.dot(X_fit, X_fit.T))
267
+ .transform(np.dot(X_pred, X_fit.T))
268
+ )
269
+
270
+ X_kpca_train = KernelPCA(
271
+ 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
272
+ ).fit_transform(np.dot(X_fit, X_fit.T))
273
+
274
+ X_kpca_train2 = (
275
+ KernelPCA(
276
+ 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
277
+ )
278
+ .fit(np.dot(X_fit, X_fit.T))
279
+ .transform(np.dot(X_fit, X_fit.T))
280
+ )
281
+
282
+ assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2))
283
+
284
+ assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2))
285
+
286
+
287
+ @pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
288
+ def test_kernel_pca_precomputed_non_symmetric(solver):
289
+ """Check that the kernel centerer works.
290
+
291
+ Tests that a non symmetric precomputed kernel is actually accepted
292
+ because the kernel centerer does its job correctly.
293
+ """
294
+
295
+ # a non symmetric gram matrix
296
+ K = [[1, 2], [3, 40]]
297
+ kpca = KernelPCA(
298
+ kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
299
+ )
300
+ kpca.fit(K) # no error
301
+
302
+ # same test with centered kernel
303
+ Kc = [[9, -9], [-9, 9]]
304
+ kpca_c = KernelPCA(
305
+ kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
306
+ )
307
+ kpca_c.fit(Kc)
308
+
309
+ # comparison between the non-centered and centered versions
310
+ assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_)
311
+ assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_)
312
+
313
+
314
+ def test_gridsearch_pipeline():
315
+ """Check that kPCA works as expected in a grid search pipeline
316
+
317
+ Test if we can do a grid-search to find parameters to separate
318
+ circles with a perceptron model.
319
+ """
320
+ X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
321
+ kpca = KernelPCA(kernel="rbf", n_components=2)
322
+ pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
323
+ param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2))
324
+ grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
325
+ grid_search.fit(X, y)
326
+ assert grid_search.best_score_ == 1
327
+
328
+
329
+ def test_gridsearch_pipeline_precomputed():
330
+ """Check that kPCA works as expected in a grid search pipeline (2)
331
+
332
+ Test if we can do a grid-search to find parameters to separate
333
+ circles with a perceptron model. This test uses a precomputed kernel.
334
+ """
335
+ X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
336
+ kpca = KernelPCA(kernel="precomputed", n_components=2)
337
+ pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
338
+ param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
339
+ grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
340
+ X_kernel = rbf_kernel(X, gamma=2.0)
341
+ grid_search.fit(X_kernel, y)
342
+ assert grid_search.best_score_ == 1
343
+
344
+
345
+ def test_nested_circles():
346
+ """Check that kPCA projects in a space where nested circles are separable
347
+
348
+ Tests that 2D nested circles become separable with a perceptron when
349
+ projected in the first 2 kPCA using an RBF kernel, while raw samples
350
+ are not directly separable in the original space.
351
+ """
352
+ X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
353
+
354
+ # 2D nested circles are not linearly separable
355
+ train_score = Perceptron(max_iter=5).fit(X, y).score(X, y)
356
+ assert train_score < 0.8
357
+
358
+ # Project the circles data into the first 2 components of a RBF Kernel
359
+ # PCA model.
360
+ # Note that the gamma value is data dependent. If this test breaks
361
+ # and the gamma value has to be updated, the Kernel PCA example will
362
+ # have to be updated too.
363
+ kpca = KernelPCA(
364
+ kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0
365
+ )
366
+ X_kpca = kpca.fit_transform(X)
367
+
368
+ # The data is perfectly linearly separable in that space
369
+ train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y)
370
+ assert train_score == 1.0
371
+
372
+
373
+ def test_kernel_conditioning():
374
+ """Check that ``_check_psd_eigenvalues`` is correctly called in kPCA
375
+
376
+ Non-regression test for issue #12140 (PR #12145).
377
+ """
378
+
379
+ # create a pathological X leading to small non-zero eigenvalue
380
+ X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]]
381
+ kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True)
382
+ kpca.fit(X)
383
+
384
+ # check that the small non-zero eigenvalue was correctly set to zero
385
+ assert kpca.eigenvalues_.min() == 0
386
+ assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_))
387
+
388
+
389
+ @pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
390
+ def test_precomputed_kernel_not_psd(solver):
391
+ """Check how KernelPCA works with non-PSD kernels depending on n_components
392
+
393
+ Tests for all methods what happens with a non PSD gram matrix (this
394
+ can happen in an isomap scenario, or with custom kernel functions, or
395
+ maybe with ill-posed datasets).
396
+
397
+ When ``n_component`` is large enough to capture a negative eigenvalue, an
398
+ error should be raised. Otherwise, KernelPCA should run without error
399
+ since the negative eigenvalues are not selected.
400
+ """
401
+
402
+ # a non PSD kernel with large eigenvalues, already centered
403
+ # it was captured from an isomap call and multiplied by 100 for compacity
404
+ K = [
405
+ [4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78],
406
+ [-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49],
407
+ [8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23],
408
+ [2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9],
409
+ [2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9],
410
+ [2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9],
411
+ [-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75],
412
+ [-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46],
413
+ ]
414
+ # this gram matrix has 5 positive eigenvalues and 3 negative ones
415
+ # [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11]
416
+
417
+ # 1. ask for enough components to get a significant negative one
418
+ kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7)
419
+ # make sure that the appropriate error is raised
420
+ with pytest.raises(ValueError, match="There are significant negative eigenvalues"):
421
+ kpca.fit(K)
422
+
423
+ # 2. ask for a small enough n_components to get only positive ones
424
+ kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2)
425
+ if solver == "randomized":
426
+ # the randomized method is still inconsistent with the others on this
427
+ # since it selects the eigenvalues based on the largest 2 modules, not
428
+ # on the largest 2 values.
429
+ #
430
+ # At least we can ensure that we return an error instead of returning
431
+ # the wrong eigenvalues
432
+ with pytest.raises(
433
+ ValueError, match="There are significant negative eigenvalues"
434
+ ):
435
+ kpca.fit(K)
436
+ else:
437
+ # general case: make sure that it works
438
+ kpca.fit(K)
439
+
440
+
441
+ @pytest.mark.parametrize("n_components", [4, 10, 20])
442
+ def test_kernel_pca_solvers_equivalence(n_components):
443
+ """Check that 'dense' 'arpack' & 'randomized' solvers give similar results"""
444
+
445
+ # Generate random data
446
+ n_train, n_test = 1_000, 100
447
+ X, _ = make_circles(
448
+ n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0
449
+ )
450
+ X_fit, X_pred = X[:n_train, :], X[n_train:, :]
451
+
452
+ # reference (full)
453
+ ref_pred = (
454
+ KernelPCA(n_components, eigen_solver="dense", random_state=0)
455
+ .fit(X_fit)
456
+ .transform(X_pred)
457
+ )
458
+
459
+ # arpack
460
+ a_pred = (
461
+ KernelPCA(n_components, eigen_solver="arpack", random_state=0)
462
+ .fit(X_fit)
463
+ .transform(X_pred)
464
+ )
465
+ # check that the result is still correct despite the approx
466
+ assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred))
467
+
468
+ # randomized
469
+ r_pred = (
470
+ KernelPCA(n_components, eigen_solver="randomized", random_state=0)
471
+ .fit(X_fit)
472
+ .transform(X_pred)
473
+ )
474
+ # check that the result is still correct despite the approximation
475
+ assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred))
476
+
477
+
478
+ def test_kernel_pca_inverse_transform_reconstruction():
479
+ """Test if the reconstruction is a good approximation.
480
+
481
+ Note that in general it is not possible to get an arbitrarily good
482
+ reconstruction because of kernel centering that does not
483
+ preserve all the information of the original data.
484
+ """
485
+ X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
486
+
487
+ kpca = KernelPCA(
488
+ n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3
489
+ )
490
+ X_trans = kpca.fit_transform(X)
491
+ X_reconst = kpca.inverse_transform(X_trans)
492
+ assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1
493
+
494
+
495
+ def test_kernel_pca_raise_not_fitted_error():
496
+ X = np.random.randn(15).reshape(5, 3)
497
+ kpca = KernelPCA()
498
+ kpca.fit(X)
499
+ with pytest.raises(NotFittedError):
500
+ kpca.inverse_transform(X)
501
+
502
+
503
+ def test_32_64_decomposition_shape():
504
+ """Test that the decomposition is similar for 32 and 64 bits data
505
+
506
+ Non regression test for
507
+ https://github.com/scikit-learn/scikit-learn/issues/18146
508
+ """
509
+ X, y = make_blobs(
510
+ n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1
511
+ )
512
+ X = StandardScaler().fit_transform(X)
513
+ X -= X.min()
514
+
515
+ # Compare the shapes (corresponds to the number of non-zero eigenvalues)
516
+ kpca = KernelPCA()
517
+ assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape
518
+
519
+
520
+ def test_kernel_pca_feature_names_out():
521
+ """Check feature names out for KernelPCA."""
522
+ X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
523
+ kpca = KernelPCA(n_components=2).fit(X)
524
+
525
+ names = kpca.get_feature_names_out()
526
+ assert_array_equal([f"kernelpca{i}" for i in range(2)], names)
527
+
528
+
529
+ def test_kernel_pca_inverse_correct_gamma():
530
+ """Check that gamma is set correctly when not provided.
531
+
532
+ Non-regression test for #26280
533
+ """
534
+ rng = np.random.RandomState(0)
535
+ X = rng.random_sample((5, 4))
536
+
537
+ kwargs = {
538
+ "n_components": 2,
539
+ "random_state": rng,
540
+ "fit_inverse_transform": True,
541
+ "kernel": "rbf",
542
+ }
543
+
544
+ expected_gamma = 1 / X.shape[1]
545
+ kpca1 = KernelPCA(gamma=None, **kwargs).fit(X)
546
+ kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X)
547
+
548
+ assert kpca1.gamma_ == expected_gamma
549
+ assert kpca2.gamma_ == expected_gamma
550
+
551
+ X1_recon = kpca1.inverse_transform(kpca1.transform(X))
552
+ X2_recon = kpca2.inverse_transform(kpca1.transform(X))
553
+
554
+ assert_allclose(X1_recon, X2_recon)
555
+
556
+
557
+ def test_kernel_pca_pandas_output():
558
+ """Check that KernelPCA works with pandas output when the solver is arpack.
559
+
560
+ Non-regression test for:
561
+ https://github.com/scikit-learn/scikit-learn/issues/27579
562
+ """
563
+ pytest.importorskip("pandas")
564
+ X, _ = load_iris(as_frame=True, return_X_y=True)
565
+ with sklearn.config_context(transform_output="pandas"):
566
+ KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py ADDED
@@ -0,0 +1,1062 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+ import warnings
4
+ from io import StringIO
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from scipy import linalg
9
+
10
+ from sklearn.base import clone
11
+ from sklearn.decomposition import NMF, MiniBatchNMF, non_negative_factorization
12
+ from sklearn.decomposition import _nmf as nmf # For testing internals
13
+ from sklearn.exceptions import ConvergenceWarning
14
+ from sklearn.utils._testing import (
15
+ assert_allclose,
16
+ assert_almost_equal,
17
+ assert_array_almost_equal,
18
+ assert_array_equal,
19
+ ignore_warnings,
20
+ )
21
+ from sklearn.utils.extmath import squared_norm
22
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
23
+
24
+
25
+ @pytest.mark.parametrize(
26
+ ["Estimator", "solver"],
27
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
28
+ )
29
+ def test_convergence_warning(Estimator, solver):
30
+ convergence_warning = (
31
+ "Maximum number of iterations 1 reached. Increase it to improve convergence."
32
+ )
33
+ A = np.ones((2, 2))
34
+ with pytest.warns(ConvergenceWarning, match=convergence_warning):
35
+ Estimator(max_iter=1, n_components="auto", **solver).fit(A)
36
+
37
+
38
+ def test_initialize_nn_output():
39
+ # Test that initialization does not return negative values
40
+ rng = np.random.mtrand.RandomState(42)
41
+ data = np.abs(rng.randn(10, 10))
42
+ for init in ("random", "nndsvd", "nndsvda", "nndsvdar"):
43
+ W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
44
+ assert not ((W < 0).any() or (H < 0).any())
45
+
46
+
47
+ # TODO(1.6): remove the warning filter for `n_components`
48
+ @pytest.mark.filterwarnings(
49
+ r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in"
50
+ r" the initialization",
51
+ "ignore:The default value of `n_components` will change",
52
+ )
53
+ def test_parameter_checking():
54
+ # Here we only check for invalid parameter values that are not already
55
+ # automatically tested in the common tests.
56
+
57
+ A = np.ones((2, 2))
58
+
59
+ msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0"
60
+ with pytest.raises(ValueError, match=msg):
61
+ NMF(solver="cd", beta_loss=1.0).fit(A)
62
+ msg = "Negative values in data passed to"
63
+ with pytest.raises(ValueError, match=msg):
64
+ NMF().fit(-A)
65
+ clf = NMF(2, tol=0.1).fit(A)
66
+ with pytest.raises(ValueError, match=msg):
67
+ clf.transform(-A)
68
+ with pytest.raises(ValueError, match=msg):
69
+ nmf._initialize_nmf(-A, 2, "nndsvd")
70
+
71
+ for init in ["nndsvd", "nndsvda", "nndsvdar"]:
72
+ msg = re.escape(
73
+ "init = '{}' can only be used when "
74
+ "n_components <= min(n_samples, n_features)".format(init)
75
+ )
76
+ with pytest.raises(ValueError, match=msg):
77
+ NMF(3, init=init).fit(A)
78
+ with pytest.raises(ValueError, match=msg):
79
+ MiniBatchNMF(3, init=init).fit(A)
80
+ with pytest.raises(ValueError, match=msg):
81
+ nmf._initialize_nmf(A, 3, init)
82
+
83
+
84
+ def test_initialize_close():
85
+ # Test NNDSVD error
86
+ # Test that _initialize_nmf error is less than the standard deviation of
87
+ # the entries in the matrix.
88
+ rng = np.random.mtrand.RandomState(42)
89
+ A = np.abs(rng.randn(10, 10))
90
+ W, H = nmf._initialize_nmf(A, 10, init="nndsvd")
91
+ error = linalg.norm(np.dot(W, H) - A)
92
+ sdev = linalg.norm(A - A.mean())
93
+ assert error <= sdev
94
+
95
+
96
+ def test_initialize_variants():
97
+ # Test NNDSVD variants correctness
98
+ # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
99
+ # 'nndsvd' only where the basic version has zeros.
100
+ rng = np.random.mtrand.RandomState(42)
101
+ data = np.abs(rng.randn(10, 10))
102
+ W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd")
103
+ Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda")
104
+ War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0)
105
+
106
+ for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
107
+ assert_almost_equal(evl[ref != 0], ref[ref != 0])
108
+
109
+
110
+ # ignore UserWarning raised when both solver='mu' and init='nndsvd'
111
+ @ignore_warnings(category=UserWarning)
112
+ @pytest.mark.parametrize(
113
+ ["Estimator", "solver"],
114
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
115
+ )
116
+ @pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random"))
117
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
118
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
119
+ def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H):
120
+ # Test that the decomposition does not contain negative values
121
+ A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)]
122
+ model = Estimator(
123
+ n_components=2,
124
+ init=init,
125
+ alpha_W=alpha_W,
126
+ alpha_H=alpha_H,
127
+ random_state=0,
128
+ **solver,
129
+ )
130
+ transf = model.fit_transform(A)
131
+ assert not ((model.components_ < 0).any() or (transf < 0).any())
132
+
133
+
134
+ @pytest.mark.parametrize(
135
+ ["Estimator", "solver"],
136
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
137
+ )
138
+ def test_nmf_fit_close(Estimator, solver):
139
+ rng = np.random.mtrand.RandomState(42)
140
+ # Test that the fit is not too far away
141
+ pnmf = Estimator(
142
+ 5,
143
+ init="nndsvdar",
144
+ random_state=0,
145
+ max_iter=600,
146
+ **solver,
147
+ )
148
+ X = np.abs(rng.randn(6, 5))
149
+ assert pnmf.fit(X).reconstruction_err_ < 0.1
150
+
151
+
152
+ def test_nmf_true_reconstruction():
153
+ # Test that the fit is not too far away from an exact solution
154
+ # (by construction)
155
+ n_samples = 15
156
+ n_features = 10
157
+ n_components = 5
158
+ beta_loss = 1
159
+ batch_size = 3
160
+ max_iter = 1000
161
+
162
+ rng = np.random.mtrand.RandomState(42)
163
+ W_true = np.zeros([n_samples, n_components])
164
+ W_array = np.abs(rng.randn(n_samples))
165
+ for j in range(n_components):
166
+ W_true[j % n_samples, j] = W_array[j % n_samples]
167
+ H_true = np.zeros([n_components, n_features])
168
+ H_array = np.abs(rng.randn(n_components))
169
+ for j in range(n_features):
170
+ H_true[j % n_components, j] = H_array[j % n_components]
171
+ X = np.dot(W_true, H_true)
172
+
173
+ model = NMF(
174
+ n_components=n_components,
175
+ solver="mu",
176
+ beta_loss=beta_loss,
177
+ max_iter=max_iter,
178
+ random_state=0,
179
+ )
180
+ transf = model.fit_transform(X)
181
+ X_calc = np.dot(transf, model.components_)
182
+
183
+ assert model.reconstruction_err_ < 0.1
184
+ assert_allclose(X, X_calc)
185
+
186
+ mbmodel = MiniBatchNMF(
187
+ n_components=n_components,
188
+ beta_loss=beta_loss,
189
+ batch_size=batch_size,
190
+ random_state=0,
191
+ max_iter=max_iter,
192
+ )
193
+ transf = mbmodel.fit_transform(X)
194
+ X_calc = np.dot(transf, mbmodel.components_)
195
+
196
+ assert mbmodel.reconstruction_err_ < 0.1
197
+ assert_allclose(X, X_calc, atol=1)
198
+
199
+
200
+ @pytest.mark.parametrize("solver", ["cd", "mu"])
201
+ def test_nmf_transform(solver):
202
+ # Test that fit_transform is equivalent to fit.transform for NMF
203
+ # Test that NMF.transform returns close values
204
+ rng = np.random.mtrand.RandomState(42)
205
+ A = np.abs(rng.randn(6, 5))
206
+ m = NMF(
207
+ solver=solver,
208
+ n_components=3,
209
+ init="random",
210
+ random_state=0,
211
+ tol=1e-6,
212
+ )
213
+ ft = m.fit_transform(A)
214
+ t = m.transform(A)
215
+ assert_allclose(ft, t, atol=1e-1)
216
+
217
+
218
+ def test_minibatch_nmf_transform():
219
+ # Test that fit_transform is equivalent to fit.transform for MiniBatchNMF
220
+ # Only guaranteed with fresh restarts
221
+ rng = np.random.mtrand.RandomState(42)
222
+ A = np.abs(rng.randn(6, 5))
223
+ m = MiniBatchNMF(
224
+ n_components=3,
225
+ random_state=0,
226
+ tol=1e-3,
227
+ fresh_restarts=True,
228
+ )
229
+ ft = m.fit_transform(A)
230
+ t = m.transform(A)
231
+ assert_allclose(ft, t)
232
+
233
+
234
+ @pytest.mark.parametrize(
235
+ ["Estimator", "solver"],
236
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
237
+ )
238
+ def test_nmf_transform_custom_init(Estimator, solver):
239
+ # Smoke test that checks if NMF.transform works with custom initialization
240
+ random_state = np.random.RandomState(0)
241
+ A = np.abs(random_state.randn(6, 5))
242
+ n_components = 4
243
+ avg = np.sqrt(A.mean() / n_components)
244
+ H_init = np.abs(avg * random_state.randn(n_components, 5))
245
+ W_init = np.abs(avg * random_state.randn(6, n_components))
246
+
247
+ m = Estimator(
248
+ n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver
249
+ )
250
+ m.fit_transform(A, W=W_init, H=H_init)
251
+ m.transform(A)
252
+
253
+
254
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
255
+ def test_nmf_inverse_transform(solver):
256
+ # Test that NMF.inverse_transform returns close values
257
+ random_state = np.random.RandomState(0)
258
+ A = np.abs(random_state.randn(6, 4))
259
+ m = NMF(
260
+ solver=solver,
261
+ n_components=4,
262
+ init="random",
263
+ random_state=0,
264
+ max_iter=1000,
265
+ )
266
+ ft = m.fit_transform(A)
267
+ A_new = m.inverse_transform(ft)
268
+ assert_array_almost_equal(A, A_new, decimal=2)
269
+
270
+
271
+ # TODO(1.6): remove the warning filter
272
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
273
+ def test_mbnmf_inverse_transform():
274
+ # Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform
275
+ # is close to the identity
276
+ rng = np.random.RandomState(0)
277
+ A = np.abs(rng.randn(6, 4))
278
+ nmf = MiniBatchNMF(
279
+ random_state=rng,
280
+ max_iter=500,
281
+ init="nndsvdar",
282
+ fresh_restarts=True,
283
+ )
284
+ ft = nmf.fit_transform(A)
285
+ A_new = nmf.inverse_transform(ft)
286
+ assert_allclose(A, A_new, rtol=1e-3, atol=1e-2)
287
+
288
+
289
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
290
+ def test_n_components_greater_n_features(Estimator):
291
+ # Smoke test for the case of more components than features.
292
+ rng = np.random.mtrand.RandomState(42)
293
+ A = np.abs(rng.randn(30, 10))
294
+ Estimator(n_components=15, random_state=0, tol=1e-2).fit(A)
295
+
296
+
297
+ @pytest.mark.parametrize(
298
+ ["Estimator", "solver"],
299
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
300
+ )
301
+ @pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
302
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
303
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
304
+ def test_nmf_sparse_input(Estimator, solver, sparse_container, alpha_W, alpha_H):
305
+ # Test that sparse matrices are accepted as input
306
+ rng = np.random.mtrand.RandomState(42)
307
+ A = np.abs(rng.randn(10, 10))
308
+ A[:, 2 * np.arange(5)] = 0
309
+ A_sparse = sparse_container(A)
310
+
311
+ est1 = Estimator(
312
+ n_components=5,
313
+ init="random",
314
+ alpha_W=alpha_W,
315
+ alpha_H=alpha_H,
316
+ random_state=0,
317
+ tol=0,
318
+ max_iter=100,
319
+ **solver,
320
+ )
321
+ est2 = clone(est1)
322
+
323
+ W1 = est1.fit_transform(A)
324
+ W2 = est2.fit_transform(A_sparse)
325
+ H1 = est1.components_
326
+ H2 = est2.components_
327
+
328
+ assert_allclose(W1, W2)
329
+ assert_allclose(H1, H2)
330
+
331
+
332
+ @pytest.mark.parametrize(
333
+ ["Estimator", "solver"],
334
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
335
+ )
336
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
337
+ def test_nmf_sparse_transform(Estimator, solver, csc_container):
338
+ # Test that transform works on sparse data. Issue #2124
339
+ rng = np.random.mtrand.RandomState(42)
340
+ A = np.abs(rng.randn(3, 2))
341
+ A[1, 1] = 0
342
+ A = csc_container(A)
343
+
344
+ model = Estimator(random_state=0, n_components=2, max_iter=400, **solver)
345
+ A_fit_tr = model.fit_transform(A)
346
+ A_tr = model.transform(A)
347
+ assert_allclose(A_fit_tr, A_tr, atol=1e-1)
348
+
349
+
350
+ # TODO(1.6): remove the warning filter
351
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
352
+ @pytest.mark.parametrize("init", ["random", "nndsvd"])
353
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
354
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
355
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
356
+ def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H):
357
+ # Test that the function is called in the same way, either directly
358
+ # or through the NMF class
359
+ max_iter = 500
360
+ rng = np.random.mtrand.RandomState(42)
361
+ A = np.abs(rng.randn(10, 10))
362
+ A[:, 2 * np.arange(5)] = 0
363
+
364
+ W_nmf, H, _ = non_negative_factorization(
365
+ A,
366
+ init=init,
367
+ solver=solver,
368
+ max_iter=max_iter,
369
+ alpha_W=alpha_W,
370
+ alpha_H=alpha_H,
371
+ random_state=1,
372
+ tol=1e-2,
373
+ )
374
+ W_nmf_2, H, _ = non_negative_factorization(
375
+ A,
376
+ H=H,
377
+ update_H=False,
378
+ init=init,
379
+ solver=solver,
380
+ max_iter=max_iter,
381
+ alpha_W=alpha_W,
382
+ alpha_H=alpha_H,
383
+ random_state=1,
384
+ tol=1e-2,
385
+ )
386
+
387
+ model_class = NMF(
388
+ init=init,
389
+ solver=solver,
390
+ max_iter=max_iter,
391
+ alpha_W=alpha_W,
392
+ alpha_H=alpha_H,
393
+ random_state=1,
394
+ tol=1e-2,
395
+ )
396
+ W_cls = model_class.fit_transform(A)
397
+ W_cls_2 = model_class.transform(A)
398
+
399
+ assert_allclose(W_nmf, W_cls)
400
+ assert_allclose(W_nmf_2, W_cls_2)
401
+
402
+
403
+ def test_non_negative_factorization_checking():
404
+ # Note that the validity of parameter types and range of possible values
405
+ # for scalar numerical or str parameters is already checked in the common
406
+ # tests. Here we only check for problems that cannot be captured by simple
407
+ # declarative constraints on the valid parameter values.
408
+
409
+ A = np.ones((2, 2))
410
+ # Test parameters checking in public function
411
+ nnmf = non_negative_factorization
412
+ msg = re.escape("Negative values in data passed to NMF (input H)")
413
+ with pytest.raises(ValueError, match=msg):
414
+ nnmf(A, A, -A, 2, init="custom")
415
+ msg = re.escape("Negative values in data passed to NMF (input W)")
416
+ with pytest.raises(ValueError, match=msg):
417
+ nnmf(A, -A, A, 2, init="custom")
418
+ msg = re.escape("Array passed to NMF (input H) is full of zeros")
419
+ with pytest.raises(ValueError, match=msg):
420
+ nnmf(A, A, 0 * A, 2, init="custom")
421
+
422
+
423
+ def _beta_divergence_dense(X, W, H, beta):
424
+ """Compute the beta-divergence of X and W.H for dense array only.
425
+
426
+ Used as a reference for testing nmf._beta_divergence.
427
+ """
428
+ WH = np.dot(W, H)
429
+
430
+ if beta == 2:
431
+ return squared_norm(X - WH) / 2
432
+
433
+ WH_Xnonzero = WH[X != 0]
434
+ X_nonzero = X[X != 0]
435
+ np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
436
+
437
+ if beta == 1:
438
+ res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
439
+ res += WH.sum() - X.sum()
440
+
441
+ elif beta == 0:
442
+ div = X_nonzero / WH_Xnonzero
443
+ res = np.sum(div) - X.size - np.sum(np.log(div))
444
+ else:
445
+ res = (X_nonzero**beta).sum()
446
+ res += (beta - 1) * (WH**beta).sum()
447
+ res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
448
+ res /= beta * (beta - 1)
449
+
450
+ return res
451
+
452
+
453
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
454
+ def test_beta_divergence(csr_container):
455
+ # Compare _beta_divergence with the reference _beta_divergence_dense
456
+ n_samples = 20
457
+ n_features = 10
458
+ n_components = 5
459
+ beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0]
460
+
461
+ # initialization
462
+ rng = np.random.mtrand.RandomState(42)
463
+ X = rng.randn(n_samples, n_features)
464
+ np.clip(X, 0, None, out=X)
465
+ X_csr = csr_container(X)
466
+ W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
467
+
468
+ for beta in beta_losses:
469
+ ref = _beta_divergence_dense(X, W, H, beta)
470
+ loss = nmf._beta_divergence(X, W, H, beta)
471
+ loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
472
+
473
+ assert_almost_equal(ref, loss, decimal=7)
474
+ assert_almost_equal(ref, loss_csr, decimal=7)
475
+
476
+
477
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
478
+ def test_special_sparse_dot(csr_container):
479
+ # Test the function that computes np.dot(W, H), only where X is non zero.
480
+ n_samples = 10
481
+ n_features = 5
482
+ n_components = 3
483
+ rng = np.random.mtrand.RandomState(42)
484
+ X = rng.randn(n_samples, n_features)
485
+ np.clip(X, 0, None, out=X)
486
+ X_csr = csr_container(X)
487
+
488
+ W = np.abs(rng.randn(n_samples, n_components))
489
+ H = np.abs(rng.randn(n_components, n_features))
490
+
491
+ WH_safe = nmf._special_sparse_dot(W, H, X_csr)
492
+ WH = nmf._special_sparse_dot(W, H, X)
493
+
494
+ # test that both results have same values, in X_csr nonzero elements
495
+ ii, jj = X_csr.nonzero()
496
+ WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
497
+ assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
498
+
499
+ # test that WH_safe and X_csr have the same sparse structure
500
+ assert_array_equal(WH_safe.indices, X_csr.indices)
501
+ assert_array_equal(WH_safe.indptr, X_csr.indptr)
502
+ assert_array_equal(WH_safe.shape, X_csr.shape)
503
+
504
+
505
+ @ignore_warnings(category=ConvergenceWarning)
506
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
507
+ def test_nmf_multiplicative_update_sparse(csr_container):
508
+ # Compare sparse and dense input in multiplicative update NMF
509
+ # Also test continuity of the results with respect to beta_loss parameter
510
+ n_samples = 20
511
+ n_features = 10
512
+ n_components = 5
513
+ alpha = 0.1
514
+ l1_ratio = 0.5
515
+ n_iter = 20
516
+
517
+ # initialization
518
+ rng = np.random.mtrand.RandomState(1337)
519
+ X = rng.randn(n_samples, n_features)
520
+ X = np.abs(X)
521
+ X_csr = csr_container(X)
522
+ W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
523
+
524
+ for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
525
+ # Reference with dense array X
526
+ W, H = W0.copy(), H0.copy()
527
+ W1, H1, _ = non_negative_factorization(
528
+ X,
529
+ W,
530
+ H,
531
+ n_components,
532
+ init="custom",
533
+ update_H=True,
534
+ solver="mu",
535
+ beta_loss=beta_loss,
536
+ max_iter=n_iter,
537
+ alpha_W=alpha,
538
+ l1_ratio=l1_ratio,
539
+ random_state=42,
540
+ )
541
+
542
+ # Compare with sparse X
543
+ W, H = W0.copy(), H0.copy()
544
+ W2, H2, _ = non_negative_factorization(
545
+ X_csr,
546
+ W,
547
+ H,
548
+ n_components,
549
+ init="custom",
550
+ update_H=True,
551
+ solver="mu",
552
+ beta_loss=beta_loss,
553
+ max_iter=n_iter,
554
+ alpha_W=alpha,
555
+ l1_ratio=l1_ratio,
556
+ random_state=42,
557
+ )
558
+
559
+ assert_allclose(W1, W2, atol=1e-7)
560
+ assert_allclose(H1, H2, atol=1e-7)
561
+
562
+ # Compare with almost same beta_loss, since some values have a specific
563
+ # behavior, but the results should be continuous w.r.t beta_loss
564
+ beta_loss -= 1.0e-5
565
+ W, H = W0.copy(), H0.copy()
566
+ W3, H3, _ = non_negative_factorization(
567
+ X_csr,
568
+ W,
569
+ H,
570
+ n_components,
571
+ init="custom",
572
+ update_H=True,
573
+ solver="mu",
574
+ beta_loss=beta_loss,
575
+ max_iter=n_iter,
576
+ alpha_W=alpha,
577
+ l1_ratio=l1_ratio,
578
+ random_state=42,
579
+ )
580
+
581
+ assert_allclose(W1, W3, atol=1e-4)
582
+ assert_allclose(H1, H3, atol=1e-4)
583
+
584
+
585
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
586
+ def test_nmf_negative_beta_loss(csr_container):
587
+ # Test that an error is raised if beta_loss < 0 and X contains zeros.
588
+ # Test that the output has not NaN values when the input contains zeros.
589
+ n_samples = 6
590
+ n_features = 5
591
+ n_components = 3
592
+
593
+ rng = np.random.mtrand.RandomState(42)
594
+ X = rng.randn(n_samples, n_features)
595
+ np.clip(X, 0, None, out=X)
596
+ X_csr = csr_container(X)
597
+
598
+ def _assert_nmf_no_nan(X, beta_loss):
599
+ W, H, _ = non_negative_factorization(
600
+ X,
601
+ init="random",
602
+ n_components=n_components,
603
+ solver="mu",
604
+ beta_loss=beta_loss,
605
+ random_state=0,
606
+ max_iter=1000,
607
+ )
608
+ assert not np.any(np.isnan(W))
609
+ assert not np.any(np.isnan(H))
610
+
611
+ msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
612
+ for beta_loss in (-0.6, 0.0):
613
+ with pytest.raises(ValueError, match=msg):
614
+ _assert_nmf_no_nan(X, beta_loss)
615
+ _assert_nmf_no_nan(X + 1e-9, beta_loss)
616
+
617
+ for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5):
618
+ _assert_nmf_no_nan(X, beta_loss)
619
+ _assert_nmf_no_nan(X_csr, beta_loss)
620
+
621
+
622
+ # TODO(1.6): remove the warning filter
623
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
624
+ @pytest.mark.parametrize("beta_loss", [-0.5, 0.0])
625
+ def test_minibatch_nmf_negative_beta_loss(beta_loss):
626
+ """Check that an error is raised if beta_loss < 0 and X contains zeros."""
627
+ rng = np.random.RandomState(0)
628
+ X = rng.normal(size=(6, 5))
629
+ X[X < 0] = 0
630
+
631
+ nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0)
632
+
633
+ msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
634
+ with pytest.raises(ValueError, match=msg):
635
+ nmf.fit(X)
636
+
637
+
638
+ @pytest.mark.parametrize(
639
+ ["Estimator", "solver"],
640
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
641
+ )
642
+ def test_nmf_regularization(Estimator, solver):
643
+ # Test the effect of L1 and L2 regularizations
644
+ n_samples = 6
645
+ n_features = 5
646
+ n_components = 3
647
+ rng = np.random.mtrand.RandomState(42)
648
+ X = np.abs(rng.randn(n_samples, n_features))
649
+
650
+ # L1 regularization should increase the number of zeros
651
+ l1_ratio = 1.0
652
+ regul = Estimator(
653
+ n_components=n_components,
654
+ alpha_W=0.5,
655
+ l1_ratio=l1_ratio,
656
+ random_state=42,
657
+ **solver,
658
+ )
659
+ model = Estimator(
660
+ n_components=n_components,
661
+ alpha_W=0.0,
662
+ l1_ratio=l1_ratio,
663
+ random_state=42,
664
+ **solver,
665
+ )
666
+
667
+ W_regul = regul.fit_transform(X)
668
+ W_model = model.fit_transform(X)
669
+
670
+ H_regul = regul.components_
671
+ H_model = model.components_
672
+
673
+ eps = np.finfo(np.float64).eps
674
+ W_regul_n_zeros = W_regul[W_regul <= eps].size
675
+ W_model_n_zeros = W_model[W_model <= eps].size
676
+ H_regul_n_zeros = H_regul[H_regul <= eps].size
677
+ H_model_n_zeros = H_model[H_model <= eps].size
678
+
679
+ assert W_regul_n_zeros > W_model_n_zeros
680
+ assert H_regul_n_zeros > H_model_n_zeros
681
+
682
+ # L2 regularization should decrease the sum of the squared norm
683
+ # of the matrices W and H
684
+ l1_ratio = 0.0
685
+ regul = Estimator(
686
+ n_components=n_components,
687
+ alpha_W=0.5,
688
+ l1_ratio=l1_ratio,
689
+ random_state=42,
690
+ **solver,
691
+ )
692
+ model = Estimator(
693
+ n_components=n_components,
694
+ alpha_W=0.0,
695
+ l1_ratio=l1_ratio,
696
+ random_state=42,
697
+ **solver,
698
+ )
699
+
700
+ W_regul = regul.fit_transform(X)
701
+ W_model = model.fit_transform(X)
702
+
703
+ H_regul = regul.components_
704
+ H_model = model.components_
705
+
706
+ assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > (
707
+ linalg.norm(W_regul)
708
+ ) ** 2.0 + (linalg.norm(H_regul)) ** 2.0
709
+
710
+
711
+ @ignore_warnings(category=ConvergenceWarning)
712
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
713
+ def test_nmf_decreasing(solver):
714
+ # test that the objective function is decreasing at each iteration
715
+ n_samples = 20
716
+ n_features = 15
717
+ n_components = 10
718
+ alpha = 0.1
719
+ l1_ratio = 0.5
720
+ tol = 0.0
721
+
722
+ # initialization
723
+ rng = np.random.mtrand.RandomState(42)
724
+ X = rng.randn(n_samples, n_features)
725
+ np.abs(X, X)
726
+ W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
727
+
728
+ for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
729
+ if solver != "mu" and beta_loss != 2:
730
+ # not implemented
731
+ continue
732
+ W, H = W0.copy(), H0.copy()
733
+ previous_loss = None
734
+ for _ in range(30):
735
+ # one more iteration starting from the previous results
736
+ W, H, _ = non_negative_factorization(
737
+ X,
738
+ W,
739
+ H,
740
+ beta_loss=beta_loss,
741
+ init="custom",
742
+ n_components=n_components,
743
+ max_iter=1,
744
+ alpha_W=alpha,
745
+ solver=solver,
746
+ tol=tol,
747
+ l1_ratio=l1_ratio,
748
+ verbose=0,
749
+ random_state=0,
750
+ update_H=True,
751
+ )
752
+
753
+ loss = (
754
+ nmf._beta_divergence(X, W, H, beta_loss)
755
+ + alpha * l1_ratio * n_features * W.sum()
756
+ + alpha * l1_ratio * n_samples * H.sum()
757
+ + alpha * (1 - l1_ratio) * n_features * (W**2).sum()
758
+ + alpha * (1 - l1_ratio) * n_samples * (H**2).sum()
759
+ )
760
+ if previous_loss is not None:
761
+ assert previous_loss > loss
762
+ previous_loss = loss
763
+
764
+
765
+ def test_nmf_underflow():
766
+ # Regression test for an underflow issue in _beta_divergence
767
+ rng = np.random.RandomState(0)
768
+ n_samples, n_features, n_components = 10, 2, 2
769
+ X = np.abs(rng.randn(n_samples, n_features)) * 10
770
+ W = np.abs(rng.randn(n_samples, n_components)) * 10
771
+ H = np.abs(rng.randn(n_components, n_features))
772
+
773
+ X[0, 0] = 0
774
+ ref = nmf._beta_divergence(X, W, H, beta=1.0)
775
+ X[0, 0] = 1e-323
776
+ res = nmf._beta_divergence(X, W, H, beta=1.0)
777
+ assert_almost_equal(res, ref)
778
+
779
+
780
+ # TODO(1.6): remove the warning filter
781
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
782
+ @pytest.mark.parametrize(
783
+ "dtype_in, dtype_out",
784
+ [
785
+ (np.float32, np.float32),
786
+ (np.float64, np.float64),
787
+ (np.int32, np.float64),
788
+ (np.int64, np.float64),
789
+ ],
790
+ )
791
+ @pytest.mark.parametrize(
792
+ ["Estimator", "solver"],
793
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
794
+ )
795
+ def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out):
796
+ # Check that NMF preserves dtype (float32 and float64)
797
+ X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False)
798
+ np.abs(X, out=X)
799
+
800
+ nmf = Estimator(
801
+ alpha_W=1.0,
802
+ alpha_H=1.0,
803
+ tol=1e-2,
804
+ random_state=0,
805
+ **solver,
806
+ )
807
+
808
+ assert nmf.fit(X).transform(X).dtype == dtype_out
809
+ assert nmf.fit_transform(X).dtype == dtype_out
810
+ assert nmf.components_.dtype == dtype_out
811
+
812
+
813
+ # TODO(1.6): remove the warning filter
814
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
815
+ @pytest.mark.parametrize(
816
+ ["Estimator", "solver"],
817
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
818
+ )
819
+ def test_nmf_float32_float64_consistency(Estimator, solver):
820
+ # Check that the result of NMF is the same between float32 and float64
821
+ X = np.random.RandomState(0).randn(50, 7)
822
+ np.abs(X, out=X)
823
+ nmf32 = Estimator(random_state=0, tol=1e-3, **solver)
824
+ W32 = nmf32.fit_transform(X.astype(np.float32))
825
+ nmf64 = Estimator(random_state=0, tol=1e-3, **solver)
826
+ W64 = nmf64.fit_transform(X)
827
+
828
+ assert_allclose(W32, W64, atol=1e-5)
829
+
830
+
831
+ # TODO(1.6): remove the warning filter
832
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
833
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
834
+ def test_nmf_custom_init_dtype_error(Estimator):
835
+ # Check that an error is raise if custom H and/or W don't have the same
836
+ # dtype as X.
837
+ rng = np.random.RandomState(0)
838
+ X = rng.random_sample((20, 15))
839
+ H = rng.random_sample((15, 15)).astype(np.float32)
840
+ W = rng.random_sample((20, 15))
841
+
842
+ with pytest.raises(TypeError, match="should have the same dtype as X"):
843
+ Estimator(init="custom").fit(X, H=H, W=W)
844
+
845
+ with pytest.raises(TypeError, match="should have the same dtype as X"):
846
+ non_negative_factorization(X, H=H, update_H=False)
847
+
848
+
849
+ @pytest.mark.parametrize("beta_loss", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5])
850
+ def test_nmf_minibatchnmf_equivalence(beta_loss):
851
+ # Test that MiniBatchNMF is equivalent to NMF when batch_size = n_samples and
852
+ # forget_factor 0.0 (stopping criterion put aside)
853
+ rng = np.random.mtrand.RandomState(42)
854
+ X = np.abs(rng.randn(48, 5))
855
+
856
+ nmf = NMF(
857
+ n_components=5,
858
+ beta_loss=beta_loss,
859
+ solver="mu",
860
+ random_state=0,
861
+ tol=0,
862
+ )
863
+ mbnmf = MiniBatchNMF(
864
+ n_components=5,
865
+ beta_loss=beta_loss,
866
+ random_state=0,
867
+ tol=0,
868
+ max_no_improvement=None,
869
+ batch_size=X.shape[0],
870
+ forget_factor=0.0,
871
+ )
872
+ W = nmf.fit_transform(X)
873
+ mbW = mbnmf.fit_transform(X)
874
+ assert_allclose(W, mbW)
875
+
876
+
877
+ def test_minibatch_nmf_partial_fit():
878
+ # Check fit / partial_fit equivalence. Applicable only with fresh restarts.
879
+ rng = np.random.mtrand.RandomState(42)
880
+ X = np.abs(rng.randn(100, 5))
881
+
882
+ n_components = 5
883
+ batch_size = 10
884
+ max_iter = 2
885
+
886
+ mbnmf1 = MiniBatchNMF(
887
+ n_components=n_components,
888
+ init="custom",
889
+ random_state=0,
890
+ max_iter=max_iter,
891
+ batch_size=batch_size,
892
+ tol=0,
893
+ max_no_improvement=None,
894
+ fresh_restarts=False,
895
+ )
896
+ mbnmf2 = MiniBatchNMF(n_components=n_components, init="custom", random_state=0)
897
+
898
+ # Force the same init of H (W is recomputed anyway) to be able to compare results.
899
+ W, H = nmf._initialize_nmf(
900
+ X, n_components=n_components, init="random", random_state=0
901
+ )
902
+
903
+ mbnmf1.fit(X, W=W, H=H)
904
+ for i in range(max_iter):
905
+ for j in range(batch_size):
906
+ mbnmf2.partial_fit(X[j : j + batch_size], W=W[:batch_size], H=H)
907
+
908
+ assert mbnmf1.n_steps_ == mbnmf2.n_steps_
909
+ assert_allclose(mbnmf1.components_, mbnmf2.components_)
910
+
911
+
912
+ def test_feature_names_out():
913
+ """Check feature names out for NMF."""
914
+ random_state = np.random.RandomState(0)
915
+ X = np.abs(random_state.randn(10, 4))
916
+ nmf = NMF(n_components=3).fit(X)
917
+
918
+ names = nmf.get_feature_names_out()
919
+ assert_array_equal([f"nmf{i}" for i in range(3)], names)
920
+
921
+
922
+ # TODO(1.6): remove the warning filter
923
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
924
+ def test_minibatch_nmf_verbose():
925
+ # Check verbose mode of MiniBatchNMF for better coverage.
926
+ A = np.random.RandomState(0).random_sample((100, 10))
927
+ nmf = MiniBatchNMF(tol=1e-2, random_state=0, verbose=1)
928
+ old_stdout = sys.stdout
929
+ sys.stdout = StringIO()
930
+ try:
931
+ nmf.fit(A)
932
+ finally:
933
+ sys.stdout = old_stdout
934
+
935
+
936
+ # TODO(1.5): remove this test
937
+ def test_NMF_inverse_transform_W_deprecation():
938
+ rng = np.random.mtrand.RandomState(42)
939
+ A = np.abs(rng.randn(6, 5))
940
+ est = NMF(
941
+ n_components=3,
942
+ init="random",
943
+ random_state=0,
944
+ tol=1e-6,
945
+ )
946
+ Xt = est.fit_transform(A)
947
+
948
+ with pytest.raises(TypeError, match="Missing required positional argument"):
949
+ est.inverse_transform()
950
+
951
+ with pytest.raises(ValueError, match="Please provide only"):
952
+ est.inverse_transform(Xt=Xt, W=Xt)
953
+
954
+ with warnings.catch_warnings(record=True):
955
+ warnings.simplefilter("error")
956
+ est.inverse_transform(Xt)
957
+
958
+ with pytest.warns(FutureWarning, match="Input argument `W` was renamed to `Xt`"):
959
+ est.inverse_transform(W=Xt)
960
+
961
+
962
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
963
+ def test_nmf_n_components_auto(Estimator):
964
+ # Check that n_components is correctly inferred
965
+ # from the provided custom initialization.
966
+ rng = np.random.RandomState(0)
967
+ X = rng.random_sample((6, 5))
968
+ W = rng.random_sample((6, 2))
969
+ H = rng.random_sample((2, 5))
970
+ est = Estimator(
971
+ n_components="auto",
972
+ init="custom",
973
+ random_state=0,
974
+ tol=1e-6,
975
+ )
976
+ est.fit_transform(X, W=W, H=H)
977
+ assert est._n_components == H.shape[0]
978
+
979
+
980
+ def test_nmf_non_negative_factorization_n_components_auto():
981
+ # Check that n_components is correctly inferred from the provided
982
+ # custom initialization.
983
+ rng = np.random.RandomState(0)
984
+ X = rng.random_sample((6, 5))
985
+ W_init = rng.random_sample((6, 2))
986
+ H_init = rng.random_sample((2, 5))
987
+ W, H, _ = non_negative_factorization(
988
+ X, W=W_init, H=H_init, init="custom", n_components="auto"
989
+ )
990
+ assert H.shape == H_init.shape
991
+ assert W.shape == W_init.shape
992
+
993
+
994
+ # TODO(1.6): remove
995
+ def test_nmf_n_components_default_value_warning():
996
+ rng = np.random.RandomState(0)
997
+ X = rng.random_sample((6, 5))
998
+ H = rng.random_sample((2, 5))
999
+ with pytest.warns(
1000
+ FutureWarning, match="The default value of `n_components` will change from"
1001
+ ):
1002
+ non_negative_factorization(X, H=H)
1003
+
1004
+
1005
+ def test_nmf_n_components_auto_no_h_update():
1006
+ # Tests that non_negative_factorization does not fail when setting
1007
+ # n_components="auto" also tests that the inferred n_component
1008
+ # value is the right one.
1009
+ rng = np.random.RandomState(0)
1010
+ X = rng.random_sample((6, 5))
1011
+ H_true = rng.random_sample((2, 5))
1012
+ W, H, _ = non_negative_factorization(
1013
+ X, H=H_true, n_components="auto", update_H=False
1014
+ ) # should not fail
1015
+ assert_allclose(H, H_true)
1016
+ assert W.shape == (X.shape[0], H_true.shape[0])
1017
+
1018
+
1019
+ def test_nmf_w_h_not_used_warning():
1020
+ # Check that warnings are raised if user provided W and H are not used
1021
+ # and initialization overrides value of W or H
1022
+ rng = np.random.RandomState(0)
1023
+ X = rng.random_sample((6, 5))
1024
+ W_init = rng.random_sample((6, 2))
1025
+ H_init = rng.random_sample((2, 5))
1026
+ with pytest.warns(
1027
+ RuntimeWarning,
1028
+ match="When init!='custom', provided W or H are ignored",
1029
+ ):
1030
+ non_negative_factorization(X, H=H_init, update_H=True, n_components="auto")
1031
+
1032
+ with pytest.warns(
1033
+ RuntimeWarning,
1034
+ match="When init!='custom', provided W or H are ignored",
1035
+ ):
1036
+ non_negative_factorization(
1037
+ X, W=W_init, H=H_init, update_H=True, n_components="auto"
1038
+ )
1039
+
1040
+ with pytest.warns(
1041
+ RuntimeWarning, match="When update_H=False, the provided initial W is not used."
1042
+ ):
1043
+ # When update_H is False, W is ignored regardless of init
1044
+ # TODO: use the provided W when init="custom".
1045
+ non_negative_factorization(
1046
+ X, W=W_init, H=H_init, update_H=False, n_components="auto"
1047
+ )
1048
+
1049
+
1050
+ def test_nmf_custom_init_shape_error():
1051
+ # Check that an informative error is raised when custom initialization does not
1052
+ # have the right shape
1053
+ rng = np.random.RandomState(0)
1054
+ X = rng.random_sample((6, 5))
1055
+ H = rng.random_sample((2, 5))
1056
+ nmf = NMF(n_components=2, init="custom", random_state=0)
1057
+
1058
+ with pytest.raises(ValueError, match="Array with wrong first dimension passed"):
1059
+ nmf.fit(X, H=H, W=rng.random_sample((5, 2)))
1060
+
1061
+ with pytest.raises(ValueError, match="Array with wrong second dimension passed"):
1062
+ nmf.fit(X, H=H, W=rng.random_sample((6, 3)))
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from io import StringIO
3
+
4
+ import numpy as np
5
+ import pytest
6
+ from numpy.testing import assert_array_equal
7
+ from scipy.linalg import block_diag
8
+ from scipy.special import psi
9
+
10
+ from sklearn.decomposition import LatentDirichletAllocation
11
+ from sklearn.decomposition._online_lda_fast import (
12
+ _dirichlet_expectation_1d,
13
+ _dirichlet_expectation_2d,
14
+ )
15
+ from sklearn.exceptions import NotFittedError
16
+ from sklearn.utils._testing import (
17
+ assert_allclose,
18
+ assert_almost_equal,
19
+ assert_array_almost_equal,
20
+ if_safe_multiprocessing_with_blas,
21
+ )
22
+ from sklearn.utils.fixes import CSR_CONTAINERS
23
+
24
+
25
+ def _build_sparse_array(csr_container):
26
+ # Create 3 topics and each topic has 3 distinct words.
27
+ # (Each word only belongs to a single topic.)
28
+ n_components = 3
29
+ block = np.full((3, 3), n_components, dtype=int)
30
+ blocks = [block] * n_components
31
+ X = block_diag(*blocks)
32
+ X = csr_container(X)
33
+ return (n_components, X)
34
+
35
+
36
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
37
+ def test_lda_default_prior_params(csr_container):
38
+ # default prior parameter should be `1 / topics`
39
+ # and verbose params should not affect result
40
+ n_components, X = _build_sparse_array(csr_container)
41
+ prior = 1.0 / n_components
42
+ lda_1 = LatentDirichletAllocation(
43
+ n_components=n_components,
44
+ doc_topic_prior=prior,
45
+ topic_word_prior=prior,
46
+ random_state=0,
47
+ )
48
+ lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0)
49
+ topic_distr_1 = lda_1.fit_transform(X)
50
+ topic_distr_2 = lda_2.fit_transform(X)
51
+ assert_almost_equal(topic_distr_1, topic_distr_2)
52
+
53
+
54
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
55
+ def test_lda_fit_batch(csr_container):
56
+ # Test LDA batch learning_offset (`fit` method with 'batch' learning)
57
+ rng = np.random.RandomState(0)
58
+ n_components, X = _build_sparse_array(csr_container)
59
+ lda = LatentDirichletAllocation(
60
+ n_components=n_components,
61
+ evaluate_every=1,
62
+ learning_method="batch",
63
+ random_state=rng,
64
+ )
65
+ lda.fit(X)
66
+
67
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
68
+ for component in lda.components_:
69
+ # Find top 3 words in each LDA component
70
+ top_idx = set(component.argsort()[-3:][::-1])
71
+ assert tuple(sorted(top_idx)) in correct_idx_grps
72
+
73
+
74
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
75
+ def test_lda_fit_online(csr_container):
76
+ # Test LDA online learning (`fit` method with 'online' learning)
77
+ rng = np.random.RandomState(0)
78
+ n_components, X = _build_sparse_array(csr_container)
79
+ lda = LatentDirichletAllocation(
80
+ n_components=n_components,
81
+ learning_offset=10.0,
82
+ evaluate_every=1,
83
+ learning_method="online",
84
+ random_state=rng,
85
+ )
86
+ lda.fit(X)
87
+
88
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
89
+ for component in lda.components_:
90
+ # Find top 3 words in each LDA component
91
+ top_idx = set(component.argsort()[-3:][::-1])
92
+ assert tuple(sorted(top_idx)) in correct_idx_grps
93
+
94
+
95
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
96
+ def test_lda_partial_fit(csr_container):
97
+ # Test LDA online learning (`partial_fit` method)
98
+ # (same as test_lda_batch)
99
+ rng = np.random.RandomState(0)
100
+ n_components, X = _build_sparse_array(csr_container)
101
+ lda = LatentDirichletAllocation(
102
+ n_components=n_components,
103
+ learning_offset=10.0,
104
+ total_samples=100,
105
+ random_state=rng,
106
+ )
107
+ for i in range(3):
108
+ lda.partial_fit(X)
109
+
110
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
111
+ for c in lda.components_:
112
+ top_idx = set(c.argsort()[-3:][::-1])
113
+ assert tuple(sorted(top_idx)) in correct_idx_grps
114
+
115
+
116
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
117
+ def test_lda_dense_input(csr_container):
118
+ # Test LDA with dense input.
119
+ rng = np.random.RandomState(0)
120
+ n_components, X = _build_sparse_array(csr_container)
121
+ lda = LatentDirichletAllocation(
122
+ n_components=n_components, learning_method="batch", random_state=rng
123
+ )
124
+ lda.fit(X.toarray())
125
+
126
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
127
+ for component in lda.components_:
128
+ # Find top 3 words in each LDA component
129
+ top_idx = set(component.argsort()[-3:][::-1])
130
+ assert tuple(sorted(top_idx)) in correct_idx_grps
131
+
132
+
133
+ def test_lda_transform():
134
+ # Test LDA transform.
135
+ # Transform result cannot be negative and should be normalized
136
+ rng = np.random.RandomState(0)
137
+ X = rng.randint(5, size=(20, 10))
138
+ n_components = 3
139
+ lda = LatentDirichletAllocation(n_components=n_components, random_state=rng)
140
+ X_trans = lda.fit_transform(X)
141
+ assert (X_trans > 0.0).any()
142
+ assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
143
+
144
+
145
+ @pytest.mark.parametrize("method", ("online", "batch"))
146
+ def test_lda_fit_transform(method):
147
+ # Test LDA fit_transform & transform
148
+ # fit_transform and transform result should be the same
149
+ rng = np.random.RandomState(0)
150
+ X = rng.randint(10, size=(50, 20))
151
+ lda = LatentDirichletAllocation(
152
+ n_components=5, learning_method=method, random_state=rng
153
+ )
154
+ X_fit = lda.fit_transform(X)
155
+ X_trans = lda.transform(X)
156
+ assert_array_almost_equal(X_fit, X_trans, 4)
157
+
158
+
159
+ def test_lda_negative_input():
160
+ # test pass dense matrix with sparse negative input.
161
+ X = np.full((5, 10), -1.0)
162
+ lda = LatentDirichletAllocation()
163
+ regex = r"^Negative values in data passed"
164
+ with pytest.raises(ValueError, match=regex):
165
+ lda.fit(X)
166
+
167
+
168
+ def test_lda_no_component_error():
169
+ # test `perplexity` before `fit`
170
+ rng = np.random.RandomState(0)
171
+ X = rng.randint(4, size=(20, 10))
172
+ lda = LatentDirichletAllocation()
173
+ regex = (
174
+ "This LatentDirichletAllocation instance is not fitted yet. "
175
+ "Call 'fit' with appropriate arguments before using this "
176
+ "estimator."
177
+ )
178
+ with pytest.raises(NotFittedError, match=regex):
179
+ lda.perplexity(X)
180
+
181
+
182
+ @if_safe_multiprocessing_with_blas
183
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
184
+ @pytest.mark.parametrize("method", ("online", "batch"))
185
+ def test_lda_multi_jobs(method, csr_container):
186
+ n_components, X = _build_sparse_array(csr_container)
187
+ # Test LDA batch training with multi CPU
188
+ rng = np.random.RandomState(0)
189
+ lda = LatentDirichletAllocation(
190
+ n_components=n_components,
191
+ n_jobs=2,
192
+ learning_method=method,
193
+ evaluate_every=1,
194
+ random_state=rng,
195
+ )
196
+ lda.fit(X)
197
+
198
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
199
+ for c in lda.components_:
200
+ top_idx = set(c.argsort()[-3:][::-1])
201
+ assert tuple(sorted(top_idx)) in correct_idx_grps
202
+
203
+
204
+ @if_safe_multiprocessing_with_blas
205
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
206
+ def test_lda_partial_fit_multi_jobs(csr_container):
207
+ # Test LDA online training with multi CPU
208
+ rng = np.random.RandomState(0)
209
+ n_components, X = _build_sparse_array(csr_container)
210
+ lda = LatentDirichletAllocation(
211
+ n_components=n_components,
212
+ n_jobs=2,
213
+ learning_offset=5.0,
214
+ total_samples=30,
215
+ random_state=rng,
216
+ )
217
+ for i in range(2):
218
+ lda.partial_fit(X)
219
+
220
+ correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
221
+ for c in lda.components_:
222
+ top_idx = set(c.argsort()[-3:][::-1])
223
+ assert tuple(sorted(top_idx)) in correct_idx_grps
224
+
225
+
226
+ def test_lda_preplexity_mismatch():
227
+ # test dimension mismatch in `perplexity` method
228
+ rng = np.random.RandomState(0)
229
+ n_components = rng.randint(3, 6)
230
+ n_samples = rng.randint(6, 10)
231
+ X = np.random.randint(4, size=(n_samples, 10))
232
+ lda = LatentDirichletAllocation(
233
+ n_components=n_components,
234
+ learning_offset=5.0,
235
+ total_samples=20,
236
+ random_state=rng,
237
+ )
238
+ lda.fit(X)
239
+ # invalid samples
240
+ invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
241
+ with pytest.raises(ValueError, match=r"Number of samples"):
242
+ lda._perplexity_precomp_distr(X, invalid_n_samples)
243
+ # invalid topic number
244
+ invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
245
+ with pytest.raises(ValueError, match=r"Number of topics"):
246
+ lda._perplexity_precomp_distr(X, invalid_n_components)
247
+
248
+
249
+ @pytest.mark.parametrize("method", ("online", "batch"))
250
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
251
+ def test_lda_perplexity(method, csr_container):
252
+ # Test LDA perplexity for batch training
253
+ # perplexity should be lower after each iteration
254
+ n_components, X = _build_sparse_array(csr_container)
255
+ lda_1 = LatentDirichletAllocation(
256
+ n_components=n_components,
257
+ max_iter=1,
258
+ learning_method=method,
259
+ total_samples=100,
260
+ random_state=0,
261
+ )
262
+ lda_2 = LatentDirichletAllocation(
263
+ n_components=n_components,
264
+ max_iter=10,
265
+ learning_method=method,
266
+ total_samples=100,
267
+ random_state=0,
268
+ )
269
+ lda_1.fit(X)
270
+ perp_1 = lda_1.perplexity(X, sub_sampling=False)
271
+
272
+ lda_2.fit(X)
273
+ perp_2 = lda_2.perplexity(X, sub_sampling=False)
274
+ assert perp_1 >= perp_2
275
+
276
+ perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
277
+ perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
278
+ assert perp_1_subsampling >= perp_2_subsampling
279
+
280
+
281
+ @pytest.mark.parametrize("method", ("online", "batch"))
282
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
283
+ def test_lda_score(method, csr_container):
284
+ # Test LDA score for batch training
285
+ # score should be higher after each iteration
286
+ n_components, X = _build_sparse_array(csr_container)
287
+ lda_1 = LatentDirichletAllocation(
288
+ n_components=n_components,
289
+ max_iter=1,
290
+ learning_method=method,
291
+ total_samples=100,
292
+ random_state=0,
293
+ )
294
+ lda_2 = LatentDirichletAllocation(
295
+ n_components=n_components,
296
+ max_iter=10,
297
+ learning_method=method,
298
+ total_samples=100,
299
+ random_state=0,
300
+ )
301
+ lda_1.fit_transform(X)
302
+ score_1 = lda_1.score(X)
303
+
304
+ lda_2.fit_transform(X)
305
+ score_2 = lda_2.score(X)
306
+ assert score_2 >= score_1
307
+
308
+
309
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
310
+ def test_perplexity_input_format(csr_container):
311
+ # Test LDA perplexity for sparse and dense input
312
+ # score should be the same for both dense and sparse input
313
+ n_components, X = _build_sparse_array(csr_container)
314
+ lda = LatentDirichletAllocation(
315
+ n_components=n_components,
316
+ max_iter=1,
317
+ learning_method="batch",
318
+ total_samples=100,
319
+ random_state=0,
320
+ )
321
+ lda.fit(X)
322
+ perp_1 = lda.perplexity(X)
323
+ perp_2 = lda.perplexity(X.toarray())
324
+ assert_almost_equal(perp_1, perp_2)
325
+
326
+
327
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
328
+ def test_lda_score_perplexity(csr_container):
329
+ # Test the relationship between LDA score and perplexity
330
+ n_components, X = _build_sparse_array(csr_container)
331
+ lda = LatentDirichletAllocation(
332
+ n_components=n_components, max_iter=10, random_state=0
333
+ )
334
+ lda.fit(X)
335
+ perplexity_1 = lda.perplexity(X, sub_sampling=False)
336
+
337
+ score = lda.score(X)
338
+ perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data)))
339
+ assert_almost_equal(perplexity_1, perplexity_2)
340
+
341
+
342
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
343
+ def test_lda_fit_perplexity(csr_container):
344
+ # Test that the perplexity computed during fit is consistent with what is
345
+ # returned by the perplexity method
346
+ n_components, X = _build_sparse_array(csr_container)
347
+ lda = LatentDirichletAllocation(
348
+ n_components=n_components,
349
+ max_iter=1,
350
+ learning_method="batch",
351
+ random_state=0,
352
+ evaluate_every=1,
353
+ )
354
+ lda.fit(X)
355
+
356
+ # Perplexity computed at end of fit method
357
+ perplexity1 = lda.bound_
358
+
359
+ # Result of perplexity method on the train set
360
+ perplexity2 = lda.perplexity(X)
361
+
362
+ assert_almost_equal(perplexity1, perplexity2)
363
+
364
+
365
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
366
+ def test_lda_empty_docs(csr_container):
367
+ """Test LDA on empty document (all-zero rows)."""
368
+ Z = np.zeros((5, 4))
369
+ for X in [Z, csr_container(Z)]:
370
+ lda = LatentDirichletAllocation(max_iter=750).fit(X)
371
+ assert_almost_equal(
372
+ lda.components_.sum(axis=0), np.ones(lda.components_.shape[1])
373
+ )
374
+
375
+
376
+ def test_dirichlet_expectation():
377
+ """Test Cython version of Dirichlet expectation calculation."""
378
+ x = np.logspace(-100, 10, 10000)
379
+ expectation = np.empty_like(x)
380
+ _dirichlet_expectation_1d(x, 0, expectation)
381
+ assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19)
382
+
383
+ x = x.reshape(100, 100)
384
+ assert_allclose(
385
+ _dirichlet_expectation_2d(x),
386
+ psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
387
+ rtol=1e-11,
388
+ atol=3e-9,
389
+ )
390
+
391
+
392
+ def check_verbosity(
393
+ verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
394
+ ):
395
+ n_components, X = _build_sparse_array(csr_container)
396
+ lda = LatentDirichletAllocation(
397
+ n_components=n_components,
398
+ max_iter=3,
399
+ learning_method="batch",
400
+ verbose=verbose,
401
+ evaluate_every=evaluate_every,
402
+ random_state=0,
403
+ )
404
+ out = StringIO()
405
+ old_out, sys.stdout = sys.stdout, out
406
+ try:
407
+ lda.fit(X)
408
+ finally:
409
+ sys.stdout = old_out
410
+
411
+ n_lines = out.getvalue().count("\n")
412
+ n_perplexity = out.getvalue().count("perplexity")
413
+ assert expected_lines == n_lines
414
+ assert expected_perplexities == n_perplexity
415
+
416
+
417
+ @pytest.mark.parametrize(
418
+ "verbose,evaluate_every,expected_lines,expected_perplexities",
419
+ [
420
+ (False, 1, 0, 0),
421
+ (False, 0, 0, 0),
422
+ (True, 0, 3, 0),
423
+ (True, 1, 3, 3),
424
+ (True, 2, 3, 1),
425
+ ],
426
+ )
427
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
428
+ def test_verbosity(
429
+ verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
430
+ ):
431
+ check_verbosity(
432
+ verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
433
+ )
434
+
435
+
436
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
437
+ def test_lda_feature_names_out(csr_container):
438
+ """Check feature names out for LatentDirichletAllocation."""
439
+ n_components, X = _build_sparse_array(csr_container)
440
+ lda = LatentDirichletAllocation(n_components=n_components).fit(X)
441
+
442
+ names = lda.get_feature_names_out()
443
+ assert_array_equal(
444
+ [f"latentdirichletallocation{i}" for i in range(n_components)], names
445
+ )
446
+
447
+
448
+ @pytest.mark.parametrize("learning_method", ("batch", "online"))
449
+ def test_lda_dtype_match(learning_method, global_dtype):
450
+ """Check data type preservation of fitted attributes."""
451
+ rng = np.random.RandomState(0)
452
+ X = rng.uniform(size=(20, 10)).astype(global_dtype, copy=False)
453
+
454
+ lda = LatentDirichletAllocation(
455
+ n_components=5, random_state=0, learning_method=learning_method
456
+ )
457
+ lda.fit(X)
458
+ assert lda.components_.dtype == global_dtype
459
+ assert lda.exp_dirichlet_component_.dtype == global_dtype
460
+
461
+
462
+ @pytest.mark.parametrize("learning_method", ("batch", "online"))
463
+ def test_lda_numerical_consistency(learning_method, global_random_seed):
464
+ """Check numerical consistency between np.float32 and np.float64."""
465
+ rng = np.random.RandomState(global_random_seed)
466
+ X64 = rng.uniform(size=(20, 10))
467
+ X32 = X64.astype(np.float32)
468
+
469
+ lda_64 = LatentDirichletAllocation(
470
+ n_components=5, random_state=global_random_seed, learning_method=learning_method
471
+ ).fit(X64)
472
+ lda_32 = LatentDirichletAllocation(
473
+ n_components=5, random_state=global_random_seed, learning_method=learning_method
474
+ ).fit(X32)
475
+
476
+ assert_allclose(lda_32.components_, lda_64.components_)
477
+ assert_allclose(lda_32.transform(X32), lda_64.transform(X64))
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py ADDED
@@ -0,0 +1,987 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import warnings
3
+
4
+ import numpy as np
5
+ import pytest
6
+ import scipy as sp
7
+ from numpy.testing import assert_array_equal
8
+
9
+ from sklearn import config_context, datasets
10
+ from sklearn.base import clone
11
+ from sklearn.datasets import load_iris, make_classification
12
+ from sklearn.decomposition import PCA
13
+ from sklearn.decomposition._pca import _assess_dimension, _infer_dimension
14
+ from sklearn.utils._array_api import (
15
+ _atol_for_type,
16
+ _convert_to_numpy,
17
+ yield_namespace_device_dtype_combinations,
18
+ )
19
+ from sklearn.utils._array_api import device as array_device
20
+ from sklearn.utils._testing import _array_api_for_tests, assert_allclose
21
+ from sklearn.utils.estimator_checks import (
22
+ _get_check_estimator_ids,
23
+ check_array_api_input_and_values,
24
+ )
25
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
26
+
27
+ iris = datasets.load_iris()
28
+ PCA_SOLVERS = ["full", "arpack", "randomized", "auto"]
29
+
30
+ # `SPARSE_M` and `SPARSE_N` could be larger, but be aware:
31
+ # * SciPy's generation of random sparse matrix can be costly
32
+ # * A (SPARSE_M, SPARSE_N) dense array is allocated to compare against
33
+ SPARSE_M, SPARSE_N = 1000, 300 # arbitrary
34
+ SPARSE_MAX_COMPONENTS = min(SPARSE_M, SPARSE_N)
35
+
36
+
37
+ def _check_fitted_pca_close(pca1, pca2, rtol):
38
+ assert_allclose(pca1.components_, pca2.components_, rtol=rtol)
39
+ assert_allclose(pca1.explained_variance_, pca2.explained_variance_, rtol=rtol)
40
+ assert_allclose(pca1.singular_values_, pca2.singular_values_, rtol=rtol)
41
+ assert_allclose(pca1.mean_, pca2.mean_, rtol=rtol)
42
+ assert_allclose(pca1.n_components_, pca2.n_components_, rtol=rtol)
43
+ assert_allclose(pca1.n_samples_, pca2.n_samples_, rtol=rtol)
44
+ assert_allclose(pca1.noise_variance_, pca2.noise_variance_, rtol=rtol)
45
+ assert_allclose(pca1.n_features_in_, pca2.n_features_in_, rtol=rtol)
46
+
47
+
48
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
49
+ @pytest.mark.parametrize("n_components", range(1, iris.data.shape[1]))
50
+ def test_pca(svd_solver, n_components):
51
+ X = iris.data
52
+ pca = PCA(n_components=n_components, svd_solver=svd_solver)
53
+
54
+ # check the shape of fit.transform
55
+ X_r = pca.fit(X).transform(X)
56
+ assert X_r.shape[1] == n_components
57
+
58
+ # check the equivalence of fit.transform and fit_transform
59
+ X_r2 = pca.fit_transform(X)
60
+ assert_allclose(X_r, X_r2)
61
+ X_r = pca.transform(X)
62
+ assert_allclose(X_r, X_r2)
63
+
64
+ # Test get_covariance and get_precision
65
+ cov = pca.get_covariance()
66
+ precision = pca.get_precision()
67
+ assert_allclose(np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-12)
68
+
69
+
70
+ @pytest.mark.parametrize("density", [0.01, 0.1, 0.30])
71
+ @pytest.mark.parametrize("n_components", [1, 2, 10])
72
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
73
+ @pytest.mark.parametrize("svd_solver", ["arpack"])
74
+ @pytest.mark.parametrize("scale", [1, 10, 100])
75
+ def test_pca_sparse(
76
+ global_random_seed, svd_solver, sparse_container, n_components, density, scale
77
+ ):
78
+ # Make sure any tolerance changes pass with SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all"
79
+ rtol = 5e-07
80
+ transform_rtol = 3e-05
81
+
82
+ random_state = np.random.default_rng(global_random_seed)
83
+ X = sparse_container(
84
+ sp.sparse.random(
85
+ SPARSE_M,
86
+ SPARSE_N,
87
+ random_state=random_state,
88
+ density=density,
89
+ )
90
+ )
91
+ # Scale the data + vary the column means
92
+ scale_vector = random_state.random(X.shape[1]) * scale
93
+ X = X.multiply(scale_vector)
94
+
95
+ pca = PCA(
96
+ n_components=n_components,
97
+ svd_solver=svd_solver,
98
+ random_state=global_random_seed,
99
+ )
100
+ pca.fit(X)
101
+
102
+ Xd = X.toarray()
103
+ pcad = PCA(
104
+ n_components=n_components,
105
+ svd_solver=svd_solver,
106
+ random_state=global_random_seed,
107
+ )
108
+ pcad.fit(Xd)
109
+
110
+ # Fitted attributes equality
111
+ _check_fitted_pca_close(pca, pcad, rtol=rtol)
112
+
113
+ # Test transform
114
+ X2 = sparse_container(
115
+ sp.sparse.random(
116
+ SPARSE_M,
117
+ SPARSE_N,
118
+ random_state=random_state,
119
+ density=density,
120
+ )
121
+ )
122
+ X2d = X2.toarray()
123
+
124
+ assert_allclose(pca.transform(X2), pca.transform(X2d), rtol=transform_rtol)
125
+ assert_allclose(pca.transform(X2), pcad.transform(X2d), rtol=transform_rtol)
126
+
127
+
128
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
129
+ def test_pca_sparse_fit_transform(global_random_seed, sparse_container):
130
+ random_state = np.random.default_rng(global_random_seed)
131
+ X = sparse_container(
132
+ sp.sparse.random(
133
+ SPARSE_M,
134
+ SPARSE_N,
135
+ random_state=random_state,
136
+ density=0.01,
137
+ )
138
+ )
139
+ X2 = sparse_container(
140
+ sp.sparse.random(
141
+ SPARSE_M,
142
+ SPARSE_N,
143
+ random_state=random_state,
144
+ density=0.01,
145
+ )
146
+ )
147
+
148
+ pca_fit = PCA(n_components=10, svd_solver="arpack", random_state=global_random_seed)
149
+ pca_fit_transform = PCA(
150
+ n_components=10, svd_solver="arpack", random_state=global_random_seed
151
+ )
152
+
153
+ pca_fit.fit(X)
154
+ transformed_X = pca_fit_transform.fit_transform(X)
155
+
156
+ _check_fitted_pca_close(pca_fit, pca_fit_transform, rtol=1e-10)
157
+ assert_allclose(transformed_X, pca_fit_transform.transform(X), rtol=2e-9)
158
+ assert_allclose(transformed_X, pca_fit.transform(X), rtol=2e-9)
159
+ assert_allclose(pca_fit.transform(X2), pca_fit_transform.transform(X2), rtol=2e-9)
160
+
161
+
162
+ @pytest.mark.parametrize("svd_solver", ["randomized", "full", "auto"])
163
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
164
+ def test_sparse_pca_solver_error(global_random_seed, svd_solver, sparse_container):
165
+ random_state = np.random.RandomState(global_random_seed)
166
+ X = sparse_container(
167
+ sp.sparse.random(
168
+ SPARSE_M,
169
+ SPARSE_N,
170
+ random_state=random_state,
171
+ )
172
+ )
173
+ pca = PCA(n_components=30, svd_solver=svd_solver)
174
+ error_msg_pattern = (
175
+ f'PCA only support sparse inputs with the "arpack" solver, while "{svd_solver}"'
176
+ " was passed"
177
+ )
178
+ with pytest.raises(TypeError, match=error_msg_pattern):
179
+ pca.fit(X)
180
+
181
+
182
+ def test_no_empty_slice_warning():
183
+ # test if we avoid numpy warnings for computing over empty arrays
184
+ n_components = 10
185
+ n_features = n_components + 2 # anything > n_comps triggered it in 0.16
186
+ X = np.random.uniform(-1, 1, size=(n_components, n_features))
187
+ pca = PCA(n_components=n_components)
188
+ with warnings.catch_warnings():
189
+ warnings.simplefilter("error", RuntimeWarning)
190
+ pca.fit(X)
191
+
192
+
193
+ @pytest.mark.parametrize("copy", [True, False])
194
+ @pytest.mark.parametrize("solver", PCA_SOLVERS)
195
+ def test_whitening(solver, copy):
196
+ # Check that PCA output has unit-variance
197
+ rng = np.random.RandomState(0)
198
+ n_samples = 100
199
+ n_features = 80
200
+ n_components = 30
201
+ rank = 50
202
+
203
+ # some low rank data with correlated features
204
+ X = np.dot(
205
+ rng.randn(n_samples, rank),
206
+ np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features)),
207
+ )
208
+ # the component-wise variance of the first 50 features is 3 times the
209
+ # mean component-wise variance of the remaining 30 features
210
+ X[:, :50] *= 3
211
+
212
+ assert X.shape == (n_samples, n_features)
213
+
214
+ # the component-wise variance is thus highly varying:
215
+ assert X.std(axis=0).std() > 43.8
216
+
217
+ # whiten the data while projecting to the lower dim subspace
218
+ X_ = X.copy() # make sure we keep an original across iterations.
219
+ pca = PCA(
220
+ n_components=n_components,
221
+ whiten=True,
222
+ copy=copy,
223
+ svd_solver=solver,
224
+ random_state=0,
225
+ iterated_power=7,
226
+ )
227
+ # test fit_transform
228
+ X_whitened = pca.fit_transform(X_.copy())
229
+ assert X_whitened.shape == (n_samples, n_components)
230
+ X_whitened2 = pca.transform(X_)
231
+ assert_allclose(X_whitened, X_whitened2, rtol=5e-4)
232
+
233
+ assert_allclose(X_whitened.std(ddof=1, axis=0), np.ones(n_components))
234
+ assert_allclose(X_whitened.mean(axis=0), np.zeros(n_components), atol=1e-12)
235
+
236
+ X_ = X.copy()
237
+ pca = PCA(
238
+ n_components=n_components, whiten=False, copy=copy, svd_solver=solver
239
+ ).fit(X_.copy())
240
+ X_unwhitened = pca.transform(X_)
241
+ assert X_unwhitened.shape == (n_samples, n_components)
242
+
243
+ # in that case the output components still have varying variances
244
+ assert X_unwhitened.std(axis=0).std() == pytest.approx(74.1, rel=1e-1)
245
+ # we always center, so no test for non-centering.
246
+
247
+
248
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
249
+ def test_pca_explained_variance_equivalence_solver(svd_solver):
250
+ rng = np.random.RandomState(0)
251
+ n_samples, n_features = 100, 80
252
+ X = rng.randn(n_samples, n_features)
253
+
254
+ pca_full = PCA(n_components=2, svd_solver="full")
255
+ pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=0)
256
+
257
+ pca_full.fit(X)
258
+ pca_other.fit(X)
259
+
260
+ assert_allclose(
261
+ pca_full.explained_variance_, pca_other.explained_variance_, rtol=5e-2
262
+ )
263
+ assert_allclose(
264
+ pca_full.explained_variance_ratio_,
265
+ pca_other.explained_variance_ratio_,
266
+ rtol=5e-2,
267
+ )
268
+
269
+
270
+ @pytest.mark.parametrize(
271
+ "X",
272
+ [
273
+ np.random.RandomState(0).randn(100, 80),
274
+ datasets.make_classification(100, 80, n_informative=78, random_state=0)[0],
275
+ ],
276
+ ids=["random-data", "correlated-data"],
277
+ )
278
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
279
+ def test_pca_explained_variance_empirical(X, svd_solver):
280
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=0)
281
+ X_pca = pca.fit_transform(X)
282
+ assert_allclose(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0))
283
+
284
+ expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0]
285
+ expected_result = sorted(expected_result, reverse=True)[:2]
286
+ assert_allclose(pca.explained_variance_, expected_result, rtol=5e-3)
287
+
288
+
289
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
290
+ def test_pca_singular_values_consistency(svd_solver):
291
+ rng = np.random.RandomState(0)
292
+ n_samples, n_features = 100, 80
293
+ X = rng.randn(n_samples, n_features)
294
+
295
+ pca_full = PCA(n_components=2, svd_solver="full", random_state=rng)
296
+ pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
297
+
298
+ pca_full.fit(X)
299
+ pca_other.fit(X)
300
+
301
+ assert_allclose(pca_full.singular_values_, pca_other.singular_values_, rtol=5e-3)
302
+
303
+
304
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
305
+ def test_pca_singular_values(svd_solver):
306
+ rng = np.random.RandomState(0)
307
+ n_samples, n_features = 100, 80
308
+ X = rng.randn(n_samples, n_features)
309
+
310
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
311
+ X_trans = pca.fit_transform(X)
312
+
313
+ # compare to the Frobenius norm
314
+ assert_allclose(
315
+ np.sum(pca.singular_values_**2), np.linalg.norm(X_trans, "fro") ** 2
316
+ )
317
+ # Compare to the 2-norms of the score vectors
318
+ assert_allclose(pca.singular_values_, np.sqrt(np.sum(X_trans**2, axis=0)))
319
+
320
+ # set the singular values and see what er get back
321
+ n_samples, n_features = 100, 110
322
+ X = rng.randn(n_samples, n_features)
323
+
324
+ pca = PCA(n_components=3, svd_solver=svd_solver, random_state=rng)
325
+ X_trans = pca.fit_transform(X)
326
+ X_trans /= np.sqrt(np.sum(X_trans**2, axis=0))
327
+ X_trans[:, 0] *= 3.142
328
+ X_trans[:, 1] *= 2.718
329
+ X_hat = np.dot(X_trans, pca.components_)
330
+ pca.fit(X_hat)
331
+ assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0])
332
+
333
+
334
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
335
+ def test_pca_check_projection(svd_solver):
336
+ # Test that the projection of data is correct
337
+ rng = np.random.RandomState(0)
338
+ n, p = 100, 3
339
+ X = rng.randn(n, p) * 0.1
340
+ X[:10] += np.array([3, 4, 5])
341
+ Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
342
+
343
+ Yt = PCA(n_components=2, svd_solver=svd_solver).fit(X).transform(Xt)
344
+ Yt /= np.sqrt((Yt**2).sum())
345
+
346
+ assert_allclose(np.abs(Yt[0][0]), 1.0, rtol=5e-3)
347
+
348
+
349
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
350
+ def test_pca_check_projection_list(svd_solver):
351
+ # Test that the projection of data is correct
352
+ X = [[1.0, 0.0], [0.0, 1.0]]
353
+ pca = PCA(n_components=1, svd_solver=svd_solver, random_state=0)
354
+ X_trans = pca.fit_transform(X)
355
+ assert X_trans.shape, (2, 1)
356
+ assert_allclose(X_trans.mean(), 0.00, atol=1e-12)
357
+ assert_allclose(X_trans.std(), 0.71, rtol=5e-3)
358
+
359
+
360
+ @pytest.mark.parametrize("svd_solver", ["full", "arpack", "randomized"])
361
+ @pytest.mark.parametrize("whiten", [False, True])
362
+ def test_pca_inverse(svd_solver, whiten):
363
+ # Test that the projection of data can be inverted
364
+ rng = np.random.RandomState(0)
365
+ n, p = 50, 3
366
+ X = rng.randn(n, p) # spherical data
367
+ X[:, 1] *= 0.00001 # make middle component relatively small
368
+ X += [5, 4, 3] # make a large mean
369
+
370
+ # same check that we can find the original data from the transformed
371
+ # signal (since the data is almost of rank n_components)
372
+ pca = PCA(n_components=2, svd_solver=svd_solver, whiten=whiten).fit(X)
373
+ Y = pca.transform(X)
374
+ Y_inverse = pca.inverse_transform(Y)
375
+ assert_allclose(X, Y_inverse, rtol=5e-6)
376
+
377
+
378
+ @pytest.mark.parametrize(
379
+ "data", [np.array([[0, 1, 0], [1, 0, 0]]), np.array([[0, 1, 0], [1, 0, 0]]).T]
380
+ )
381
+ @pytest.mark.parametrize(
382
+ "svd_solver, n_components, err_msg",
383
+ [
384
+ ("arpack", 0, r"must be between 1 and min\(n_samples, n_features\)"),
385
+ ("randomized", 0, r"must be between 1 and min\(n_samples, n_features\)"),
386
+ ("arpack", 2, r"must be strictly less than min"),
387
+ (
388
+ "auto",
389
+ 3,
390
+ (
391
+ r"n_components=3 must be between 0 and min\(n_samples, "
392
+ r"n_features\)=2 with svd_solver='full'"
393
+ ),
394
+ ),
395
+ ],
396
+ )
397
+ def test_pca_validation(svd_solver, data, n_components, err_msg):
398
+ # Ensures that solver-specific extreme inputs for the n_components
399
+ # parameter raise errors
400
+ smallest_d = 2 # The smallest dimension
401
+ pca_fitted = PCA(n_components, svd_solver=svd_solver)
402
+
403
+ with pytest.raises(ValueError, match=err_msg):
404
+ pca_fitted.fit(data)
405
+
406
+ # Additional case for arpack
407
+ if svd_solver == "arpack":
408
+ n_components = smallest_d
409
+
410
+ err_msg = (
411
+ "n_components={}L? must be strictly less than "
412
+ r"min\(n_samples, n_features\)={}L? with "
413
+ "svd_solver='arpack'".format(n_components, smallest_d)
414
+ )
415
+ with pytest.raises(ValueError, match=err_msg):
416
+ PCA(n_components, svd_solver=svd_solver).fit(data)
417
+
418
+
419
+ @pytest.mark.parametrize(
420
+ "solver, n_components_",
421
+ [
422
+ ("full", min(iris.data.shape)),
423
+ ("arpack", min(iris.data.shape) - 1),
424
+ ("randomized", min(iris.data.shape)),
425
+ ],
426
+ )
427
+ @pytest.mark.parametrize("data", [iris.data, iris.data.T])
428
+ def test_n_components_none(data, solver, n_components_):
429
+ pca = PCA(svd_solver=solver)
430
+ pca.fit(data)
431
+ assert pca.n_components_ == n_components_
432
+
433
+
434
+ @pytest.mark.parametrize("svd_solver", ["auto", "full"])
435
+ def test_n_components_mle(svd_solver):
436
+ # Ensure that n_components == 'mle' doesn't raise error for auto/full
437
+ rng = np.random.RandomState(0)
438
+ n_samples, n_features = 600, 10
439
+ X = rng.randn(n_samples, n_features)
440
+ pca = PCA(n_components="mle", svd_solver=svd_solver)
441
+ pca.fit(X)
442
+ assert pca.n_components_ == 1
443
+
444
+
445
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
446
+ def test_n_components_mle_error(svd_solver):
447
+ # Ensure that n_components == 'mle' will raise an error for unsupported
448
+ # solvers
449
+ rng = np.random.RandomState(0)
450
+ n_samples, n_features = 600, 10
451
+ X = rng.randn(n_samples, n_features)
452
+ pca = PCA(n_components="mle", svd_solver=svd_solver)
453
+ err_msg = "n_components='mle' cannot be a string with svd_solver='{}'".format(
454
+ svd_solver
455
+ )
456
+ with pytest.raises(ValueError, match=err_msg):
457
+ pca.fit(X)
458
+
459
+
460
+ def test_pca_dim():
461
+ # Check automated dimensionality setting
462
+ rng = np.random.RandomState(0)
463
+ n, p = 100, 5
464
+ X = rng.randn(n, p) * 0.1
465
+ X[:10] += np.array([3, 4, 5, 1, 2])
466
+ pca = PCA(n_components="mle", svd_solver="full").fit(X)
467
+ assert pca.n_components == "mle"
468
+ assert pca.n_components_ == 1
469
+
470
+
471
+ def test_infer_dim_1():
472
+ # TODO: explain what this is testing
473
+ # Or at least use explicit variable names...
474
+ n, p = 1000, 5
475
+ rng = np.random.RandomState(0)
476
+ X = (
477
+ rng.randn(n, p) * 0.1
478
+ + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
479
+ + np.array([1, 0, 7, 4, 6])
480
+ )
481
+ pca = PCA(n_components=p, svd_solver="full")
482
+ pca.fit(X)
483
+ spect = pca.explained_variance_
484
+ ll = np.array([_assess_dimension(spect, k, n) for k in range(1, p)])
485
+ assert ll[1] > ll.max() - 0.01 * n
486
+
487
+
488
+ def test_infer_dim_2():
489
+ # TODO: explain what this is testing
490
+ # Or at least use explicit variable names...
491
+ n, p = 1000, 5
492
+ rng = np.random.RandomState(0)
493
+ X = rng.randn(n, p) * 0.1
494
+ X[:10] += np.array([3, 4, 5, 1, 2])
495
+ X[10:20] += np.array([6, 0, 7, 2, -1])
496
+ pca = PCA(n_components=p, svd_solver="full")
497
+ pca.fit(X)
498
+ spect = pca.explained_variance_
499
+ assert _infer_dimension(spect, n) > 1
500
+
501
+
502
+ def test_infer_dim_3():
503
+ n, p = 100, 5
504
+ rng = np.random.RandomState(0)
505
+ X = rng.randn(n, p) * 0.1
506
+ X[:10] += np.array([3, 4, 5, 1, 2])
507
+ X[10:20] += np.array([6, 0, 7, 2, -1])
508
+ X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
509
+ pca = PCA(n_components=p, svd_solver="full")
510
+ pca.fit(X)
511
+ spect = pca.explained_variance_
512
+ assert _infer_dimension(spect, n) > 2
513
+
514
+
515
+ @pytest.mark.parametrize(
516
+ "X, n_components, n_components_validated",
517
+ [
518
+ (iris.data, 0.95, 2), # row > col
519
+ (iris.data, 0.01, 1), # row > col
520
+ (np.random.RandomState(0).rand(5, 20), 0.5, 2),
521
+ ], # row < col
522
+ )
523
+ def test_infer_dim_by_explained_variance(X, n_components, n_components_validated):
524
+ pca = PCA(n_components=n_components, svd_solver="full")
525
+ pca.fit(X)
526
+ assert pca.n_components == pytest.approx(n_components)
527
+ assert pca.n_components_ == n_components_validated
528
+
529
+
530
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
531
+ def test_pca_score(svd_solver):
532
+ # Test that probabilistic PCA scoring yields a reasonable score
533
+ n, p = 1000, 3
534
+ rng = np.random.RandomState(0)
535
+ X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
536
+ pca = PCA(n_components=2, svd_solver=svd_solver)
537
+ pca.fit(X)
538
+
539
+ ll1 = pca.score(X)
540
+ h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1**2) * p
541
+ assert_allclose(ll1 / h, 1, rtol=5e-2)
542
+
543
+ ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5]))
544
+ assert ll1 > ll2
545
+
546
+ pca = PCA(n_components=2, whiten=True, svd_solver=svd_solver)
547
+ pca.fit(X)
548
+ ll2 = pca.score(X)
549
+ assert ll1 > ll2
550
+
551
+
552
+ def test_pca_score3():
553
+ # Check that probabilistic PCA selects the right model
554
+ n, p = 200, 3
555
+ rng = np.random.RandomState(0)
556
+ Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
557
+ Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
558
+ ll = np.zeros(p)
559
+ for k in range(p):
560
+ pca = PCA(n_components=k, svd_solver="full")
561
+ pca.fit(Xl)
562
+ ll[k] = pca.score(Xt)
563
+
564
+ assert ll.argmax() == 1
565
+
566
+
567
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
568
+ def test_pca_sanity_noise_variance(svd_solver):
569
+ # Sanity check for the noise_variance_. For more details see
570
+ # https://github.com/scikit-learn/scikit-learn/issues/7568
571
+ # https://github.com/scikit-learn/scikit-learn/issues/8541
572
+ # https://github.com/scikit-learn/scikit-learn/issues/8544
573
+ X, _ = datasets.load_digits(return_X_y=True)
574
+ pca = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
575
+ pca.fit(X)
576
+ assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0)
577
+
578
+
579
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
580
+ def test_pca_score_consistency_solvers(svd_solver):
581
+ # Check the consistency of score between solvers
582
+ X, _ = datasets.load_digits(return_X_y=True)
583
+ pca_full = PCA(n_components=30, svd_solver="full", random_state=0)
584
+ pca_other = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
585
+ pca_full.fit(X)
586
+ pca_other.fit(X)
587
+ assert_allclose(pca_full.score(X), pca_other.score(X), rtol=5e-6)
588
+
589
+
590
+ # arpack raises ValueError for n_components == min(n_samples, n_features)
591
+ @pytest.mark.parametrize("svd_solver", ["full", "randomized"])
592
+ def test_pca_zero_noise_variance_edge_cases(svd_solver):
593
+ # ensure that noise_variance_ is 0 in edge cases
594
+ # when n_components == min(n_samples, n_features)
595
+ n, p = 100, 3
596
+ rng = np.random.RandomState(0)
597
+ X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
598
+
599
+ pca = PCA(n_components=p, svd_solver=svd_solver)
600
+ pca.fit(X)
601
+ assert pca.noise_variance_ == 0
602
+ # Non-regression test for gh-12489
603
+ # ensure no divide-by-zero error for n_components == n_features < n_samples
604
+ pca.score(X)
605
+
606
+ pca.fit(X.T)
607
+ assert pca.noise_variance_ == 0
608
+ # Non-regression test for gh-12489
609
+ # ensure no divide-by-zero error for n_components == n_samples < n_features
610
+ pca.score(X.T)
611
+
612
+
613
+ @pytest.mark.parametrize(
614
+ "data, n_components, expected_solver",
615
+ [ # case: n_components in (0,1) => 'full'
616
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 0.5, "full"),
617
+ # case: max(X.shape) <= 500 => 'full'
618
+ (np.random.RandomState(0).uniform(size=(10, 50)), 5, "full"),
619
+ # case: n_components >= .8 * min(X.shape) => 'full'
620
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 50, "full"),
621
+ # n_components >= 1 and n_components < .8*min(X.shape) => 'randomized'
622
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 10, "randomized"),
623
+ ],
624
+ )
625
+ def test_pca_svd_solver_auto(data, n_components, expected_solver):
626
+ pca_auto = PCA(n_components=n_components, random_state=0)
627
+ pca_test = PCA(
628
+ n_components=n_components, svd_solver=expected_solver, random_state=0
629
+ )
630
+ pca_auto.fit(data)
631
+ pca_test.fit(data)
632
+ assert_allclose(pca_auto.components_, pca_test.components_)
633
+
634
+
635
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
636
+ def test_pca_deterministic_output(svd_solver):
637
+ rng = np.random.RandomState(0)
638
+ X = rng.rand(10, 10)
639
+
640
+ transformed_X = np.zeros((20, 2))
641
+ for i in range(20):
642
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
643
+ transformed_X[i, :] = pca.fit_transform(X)[0]
644
+ assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
645
+
646
+
647
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
648
+ def test_pca_dtype_preservation(svd_solver):
649
+ check_pca_float_dtype_preservation(svd_solver)
650
+ check_pca_int_dtype_upcast_to_double(svd_solver)
651
+
652
+
653
+ def check_pca_float_dtype_preservation(svd_solver):
654
+ # Ensure that PCA does not upscale the dtype when input is float32
655
+ X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False)
656
+ X_32 = X_64.astype(np.float32)
657
+
658
+ pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64)
659
+ pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32)
660
+
661
+ assert pca_64.components_.dtype == np.float64
662
+ assert pca_32.components_.dtype == np.float32
663
+ assert pca_64.transform(X_64).dtype == np.float64
664
+ assert pca_32.transform(X_32).dtype == np.float32
665
+
666
+ # the rtol is set such that the test passes on all platforms tested on
667
+ # conda-forge: PR#15775
668
+ # see: https://github.com/conda-forge/scikit-learn-feedstock/pull/113
669
+ assert_allclose(pca_64.components_, pca_32.components_, rtol=2e-4)
670
+
671
+
672
+ def check_pca_int_dtype_upcast_to_double(svd_solver):
673
+ # Ensure that all int types will be upcast to float64
674
+ X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
675
+ X_i64 = X_i64.astype(np.int64, copy=False)
676
+ X_i32 = X_i64.astype(np.int32, copy=False)
677
+
678
+ pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64)
679
+ pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32)
680
+
681
+ assert pca_64.components_.dtype == np.float64
682
+ assert pca_32.components_.dtype == np.float64
683
+ assert pca_64.transform(X_i64).dtype == np.float64
684
+ assert pca_32.transform(X_i32).dtype == np.float64
685
+
686
+ assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-4)
687
+
688
+
689
+ def test_pca_n_components_mostly_explained_variance_ratio():
690
+ # when n_components is the second highest cumulative sum of the
691
+ # explained_variance_ratio_, then n_components_ should equal the
692
+ # number of features in the dataset #15669
693
+ X, y = load_iris(return_X_y=True)
694
+ pca1 = PCA().fit(X, y)
695
+
696
+ n_components = pca1.explained_variance_ratio_.cumsum()[-2]
697
+ pca2 = PCA(n_components=n_components).fit(X, y)
698
+ assert pca2.n_components_ == X.shape[1]
699
+
700
+
701
+ def test_assess_dimension_bad_rank():
702
+ # Test error when tested rank not in [1, n_features - 1]
703
+ spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
704
+ n_samples = 10
705
+ for rank in (0, 5):
706
+ with pytest.raises(ValueError, match=r"should be in \[1, n_features - 1\]"):
707
+ _assess_dimension(spectrum, rank, n_samples)
708
+
709
+
710
+ def test_small_eigenvalues_mle():
711
+ # Test rank associated with tiny eigenvalues are given a log-likelihood of
712
+ # -inf. The inferred rank will be 1
713
+ spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
714
+
715
+ assert _assess_dimension(spectrum, rank=1, n_samples=10) > -np.inf
716
+
717
+ for rank in (2, 3):
718
+ assert _assess_dimension(spectrum, rank, 10) == -np.inf
719
+
720
+ assert _infer_dimension(spectrum, 10) == 1
721
+
722
+
723
+ def test_mle_redundant_data():
724
+ # Test 'mle' with pathological X: only one relevant feature should give a
725
+ # rank of 1
726
+ X, _ = datasets.make_classification(
727
+ n_features=20,
728
+ n_informative=1,
729
+ n_repeated=18,
730
+ n_redundant=1,
731
+ n_clusters_per_class=1,
732
+ random_state=42,
733
+ )
734
+ pca = PCA(n_components="mle").fit(X)
735
+ assert pca.n_components_ == 1
736
+
737
+
738
+ def test_fit_mle_too_few_samples():
739
+ # Tests that an error is raised when the number of samples is smaller
740
+ # than the number of features during an mle fit
741
+ X, _ = datasets.make_classification(n_samples=20, n_features=21, random_state=42)
742
+
743
+ pca = PCA(n_components="mle", svd_solver="full")
744
+ with pytest.raises(
745
+ ValueError,
746
+ match="n_components='mle' is only supported if n_samples >= n_features",
747
+ ):
748
+ pca.fit(X)
749
+
750
+
751
+ def test_mle_simple_case():
752
+ # non-regression test for issue
753
+ # https://github.com/scikit-learn/scikit-learn/issues/16730
754
+ n_samples, n_dim = 1000, 10
755
+ X = np.random.RandomState(0).randn(n_samples, n_dim)
756
+ X[:, -1] = np.mean(X[:, :-1], axis=-1) # true X dim is ndim - 1
757
+ pca_skl = PCA("mle", svd_solver="full")
758
+ pca_skl.fit(X)
759
+ assert pca_skl.n_components_ == n_dim - 1
760
+
761
+
762
+ def test_assess_dimesion_rank_one():
763
+ # Make sure assess_dimension works properly on a matrix of rank 1
764
+ n_samples, n_features = 9, 6
765
+ X = np.ones((n_samples, n_features)) # rank 1 matrix
766
+ _, s, _ = np.linalg.svd(X, full_matrices=True)
767
+ # except for rank 1, all eigenvalues are 0 resp. close to 0 (FP)
768
+ assert_allclose(s[1:], np.zeros(n_features - 1), atol=1e-12)
769
+
770
+ assert np.isfinite(_assess_dimension(s, rank=1, n_samples=n_samples))
771
+ for rank in range(2, n_features):
772
+ assert _assess_dimension(s, rank, n_samples) == -np.inf
773
+
774
+
775
+ def test_pca_randomized_svd_n_oversamples():
776
+ """Check that exposing and setting `n_oversamples` will provide accurate results
777
+ even when `X` as a large number of features.
778
+
779
+ Non-regression test for:
780
+ https://github.com/scikit-learn/scikit-learn/issues/20589
781
+ """
782
+ rng = np.random.RandomState(0)
783
+ n_features = 100
784
+ X = rng.randn(1_000, n_features)
785
+
786
+ # The default value of `n_oversamples` will lead to inaccurate results
787
+ # We force it to the number of features.
788
+ pca_randomized = PCA(
789
+ n_components=1,
790
+ svd_solver="randomized",
791
+ n_oversamples=n_features,
792
+ random_state=0,
793
+ ).fit(X)
794
+ pca_full = PCA(n_components=1, svd_solver="full").fit(X)
795
+ pca_arpack = PCA(n_components=1, svd_solver="arpack", random_state=0).fit(X)
796
+
797
+ assert_allclose(np.abs(pca_full.components_), np.abs(pca_arpack.components_))
798
+ assert_allclose(np.abs(pca_randomized.components_), np.abs(pca_arpack.components_))
799
+
800
+
801
+ def test_feature_names_out():
802
+ """Check feature names out for PCA."""
803
+ pca = PCA(n_components=2).fit(iris.data)
804
+
805
+ names = pca.get_feature_names_out()
806
+ assert_array_equal([f"pca{i}" for i in range(2)], names)
807
+
808
+
809
+ @pytest.mark.parametrize("copy", [True, False])
810
+ def test_variance_correctness(copy):
811
+ """Check the accuracy of PCA's internal variance calculation"""
812
+ rng = np.random.RandomState(0)
813
+ X = rng.randn(1000, 200)
814
+ pca = PCA().fit(X)
815
+ pca_var = pca.explained_variance_ / pca.explained_variance_ratio_
816
+ true_var = np.var(X, ddof=1, axis=0).sum()
817
+ np.testing.assert_allclose(pca_var, true_var)
818
+
819
+
820
+ def check_array_api_get_precision(name, estimator, array_namespace, device, dtype_name):
821
+ xp = _array_api_for_tests(array_namespace, device)
822
+ iris_np = iris.data.astype(dtype_name)
823
+ iris_xp = xp.asarray(iris_np, device=device)
824
+
825
+ estimator.fit(iris_np)
826
+ precision_np = estimator.get_precision()
827
+ covariance_np = estimator.get_covariance()
828
+
829
+ with config_context(array_api_dispatch=True):
830
+ estimator_xp = clone(estimator).fit(iris_xp)
831
+ precision_xp = estimator_xp.get_precision()
832
+ assert precision_xp.shape == (4, 4)
833
+ assert precision_xp.dtype == iris_xp.dtype
834
+
835
+ assert_allclose(
836
+ _convert_to_numpy(precision_xp, xp=xp),
837
+ precision_np,
838
+ atol=_atol_for_type(dtype_name),
839
+ )
840
+ covariance_xp = estimator_xp.get_covariance()
841
+ assert covariance_xp.shape == (4, 4)
842
+ assert covariance_xp.dtype == iris_xp.dtype
843
+
844
+ assert_allclose(
845
+ _convert_to_numpy(covariance_xp, xp=xp),
846
+ covariance_np,
847
+ atol=_atol_for_type(dtype_name),
848
+ )
849
+
850
+
851
+ @pytest.mark.parametrize(
852
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
853
+ )
854
+ @pytest.mark.parametrize(
855
+ "check",
856
+ [check_array_api_input_and_values, check_array_api_get_precision],
857
+ ids=_get_check_estimator_ids,
858
+ )
859
+ @pytest.mark.parametrize(
860
+ "estimator",
861
+ [
862
+ PCA(n_components=2, svd_solver="full"),
863
+ PCA(n_components=0.1, svd_solver="full", whiten=True),
864
+ PCA(
865
+ n_components=2,
866
+ svd_solver="randomized",
867
+ power_iteration_normalizer="QR",
868
+ random_state=0, # how to use global_random_seed here?
869
+ ),
870
+ ],
871
+ ids=_get_check_estimator_ids,
872
+ )
873
+ def test_pca_array_api_compliance(
874
+ estimator, check, array_namespace, device, dtype_name
875
+ ):
876
+ name = estimator.__class__.__name__
877
+ check(name, estimator, array_namespace, device=device, dtype_name=dtype_name)
878
+
879
+
880
+ @pytest.mark.parametrize(
881
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
882
+ )
883
+ @pytest.mark.parametrize(
884
+ "check",
885
+ [check_array_api_get_precision],
886
+ ids=_get_check_estimator_ids,
887
+ )
888
+ @pytest.mark.parametrize(
889
+ "estimator",
890
+ [
891
+ # PCA with mle cannot use check_array_api_input_and_values because of
892
+ # rounding errors in the noisy (low variance) components. Even checking
893
+ # the shape of the `components_` is problematic because the number of
894
+ # components depends on trimming threshold of the mle algorithm which
895
+ # can depend on device-specific rounding errors.
896
+ PCA(n_components="mle", svd_solver="full"),
897
+ ],
898
+ ids=_get_check_estimator_ids,
899
+ )
900
+ def test_pca_mle_array_api_compliance(
901
+ estimator, check, array_namespace, device, dtype_name
902
+ ):
903
+ name = estimator.__class__.__name__
904
+ check(name, estimator, array_namespace, device=device, dtype_name=dtype_name)
905
+
906
+ # Simpler variant of the generic check_array_api_input checker tailored for
907
+ # the specific case of PCA with mle-trimmed components.
908
+ xp = _array_api_for_tests(array_namespace, device)
909
+
910
+ X, y = make_classification(random_state=42)
911
+ X = X.astype(dtype_name, copy=False)
912
+ atol = _atol_for_type(X.dtype)
913
+
914
+ est = clone(estimator)
915
+
916
+ X_xp = xp.asarray(X, device=device)
917
+ y_xp = xp.asarray(y, device=device)
918
+
919
+ est.fit(X, y)
920
+
921
+ components_np = est.components_
922
+ explained_variance_np = est.explained_variance_
923
+
924
+ est_xp = clone(est)
925
+ with config_context(array_api_dispatch=True):
926
+ est_xp.fit(X_xp, y_xp)
927
+ components_xp = est_xp.components_
928
+ assert array_device(components_xp) == array_device(X_xp)
929
+ components_xp_np = _convert_to_numpy(components_xp, xp=xp)
930
+
931
+ explained_variance_xp = est_xp.explained_variance_
932
+ assert array_device(explained_variance_xp) == array_device(X_xp)
933
+ explained_variance_xp_np = _convert_to_numpy(explained_variance_xp, xp=xp)
934
+
935
+ assert components_xp_np.dtype == components_np.dtype
936
+ assert components_xp_np.shape[1] == components_np.shape[1]
937
+ assert explained_variance_xp_np.dtype == explained_variance_np.dtype
938
+
939
+ # Check that the explained variance values match for the
940
+ # common components:
941
+ min_components = min(components_xp_np.shape[0], components_np.shape[0])
942
+ assert_allclose(
943
+ explained_variance_xp_np[:min_components],
944
+ explained_variance_np[:min_components],
945
+ atol=atol,
946
+ )
947
+
948
+ # If the number of components differ, check that the explained variance of
949
+ # the trimmed components is very small.
950
+ if components_xp_np.shape[0] != components_np.shape[0]:
951
+ reference_variance = explained_variance_np[-1]
952
+ extra_variance_np = explained_variance_np[min_components:]
953
+ extra_variance_xp_np = explained_variance_xp_np[min_components:]
954
+ assert all(np.abs(extra_variance_np - reference_variance) < atol)
955
+ assert all(np.abs(extra_variance_xp_np - reference_variance) < atol)
956
+
957
+
958
+ def test_array_api_error_and_warnings_on_unsupported_params():
959
+ pytest.importorskip("array_api_compat")
960
+ xp = pytest.importorskip("numpy.array_api")
961
+ iris_xp = xp.asarray(iris.data)
962
+
963
+ pca = PCA(n_components=2, svd_solver="arpack", random_state=0)
964
+ expected_msg = re.escape(
965
+ "PCA with svd_solver='arpack' is not supported for Array API inputs."
966
+ )
967
+ with pytest.raises(ValueError, match=expected_msg):
968
+ with config_context(array_api_dispatch=True):
969
+ pca.fit(iris_xp)
970
+
971
+ pca.set_params(svd_solver="randomized", power_iteration_normalizer="LU")
972
+ expected_msg = re.escape(
973
+ "Array API does not support LU factorization. Set"
974
+ " `power_iteration_normalizer='QR'` instead."
975
+ )
976
+ with pytest.raises(ValueError, match=expected_msg):
977
+ with config_context(array_api_dispatch=True):
978
+ pca.fit(iris_xp)
979
+
980
+ pca.set_params(svd_solver="randomized", power_iteration_normalizer="auto")
981
+ expected_msg = re.escape(
982
+ "Array API does not support LU factorization, falling back to QR instead. Set"
983
+ " `power_iteration_normalizer='QR'` explicitly to silence this warning."
984
+ )
985
+ with pytest.warns(UserWarning, match=expected_msg):
986
+ with config_context(array_api_dispatch=True):
987
+ pca.fit(iris_xp)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Vlad Niculae
2
+ # License: BSD 3 clause
3
+
4
+ import sys
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from numpy.testing import assert_array_equal
9
+
10
+ from sklearn.decomposition import PCA, MiniBatchSparsePCA, SparsePCA
11
+ from sklearn.utils import check_random_state
12
+ from sklearn.utils._testing import (
13
+ assert_allclose,
14
+ assert_array_almost_equal,
15
+ if_safe_multiprocessing_with_blas,
16
+ )
17
+
18
+
19
+ def generate_toy_data(n_components, n_samples, image_size, random_state=None):
20
+ n_features = image_size[0] * image_size[1]
21
+
22
+ rng = check_random_state(random_state)
23
+ U = rng.randn(n_samples, n_components)
24
+ V = rng.randn(n_components, n_features)
25
+
26
+ centers = [(3, 3), (6, 7), (8, 1)]
27
+ sz = [1, 2, 1]
28
+ for k in range(n_components):
29
+ img = np.zeros(image_size)
30
+ xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
31
+ ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
32
+ img[xmin:xmax][:, ymin:ymax] = 1.0
33
+ V[k, :] = img.ravel()
34
+
35
+ # Y is defined by : Y = UV + noise
36
+ Y = np.dot(U, V)
37
+ Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
38
+ return Y, U, V
39
+
40
+
41
+ # SparsePCA can be a bit slow. To avoid having test times go up, we
42
+ # test different aspects of the code in the same test
43
+
44
+
45
+ def test_correct_shapes():
46
+ rng = np.random.RandomState(0)
47
+ X = rng.randn(12, 10)
48
+ spca = SparsePCA(n_components=8, random_state=rng)
49
+ U = spca.fit_transform(X)
50
+ assert spca.components_.shape == (8, 10)
51
+ assert U.shape == (12, 8)
52
+ # test overcomplete decomposition
53
+ spca = SparsePCA(n_components=13, random_state=rng)
54
+ U = spca.fit_transform(X)
55
+ assert spca.components_.shape == (13, 10)
56
+ assert U.shape == (12, 13)
57
+
58
+
59
+ def test_fit_transform():
60
+ alpha = 1
61
+ rng = np.random.RandomState(0)
62
+ Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
63
+ spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0)
64
+ spca_lars.fit(Y)
65
+
66
+ # Test that CD gives similar results
67
+ spca_lasso = SparsePCA(n_components=3, method="cd", random_state=0, alpha=alpha)
68
+ spca_lasso.fit(Y)
69
+ assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
70
+
71
+
72
+ @if_safe_multiprocessing_with_blas
73
+ def test_fit_transform_parallel():
74
+ alpha = 1
75
+ rng = np.random.RandomState(0)
76
+ Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
77
+ spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0)
78
+ spca_lars.fit(Y)
79
+ U1 = spca_lars.transform(Y)
80
+ # Test multiple CPUs
81
+ spca = SparsePCA(
82
+ n_components=3, n_jobs=2, method="lars", alpha=alpha, random_state=0
83
+ ).fit(Y)
84
+ U2 = spca.transform(Y)
85
+ assert not np.all(spca_lars.components_ == 0)
86
+ assert_array_almost_equal(U1, U2)
87
+
88
+
89
+ def test_transform_nan():
90
+ # Test that SparsePCA won't return NaN when there is 0 feature in all
91
+ # samples.
92
+ rng = np.random.RandomState(0)
93
+ Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
94
+ Y[:, 0] = 0
95
+ estimator = SparsePCA(n_components=8)
96
+ assert not np.any(np.isnan(estimator.fit_transform(Y)))
97
+
98
+
99
+ def test_fit_transform_tall():
100
+ rng = np.random.RandomState(0)
101
+ Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
102
+ spca_lars = SparsePCA(n_components=3, method="lars", random_state=rng)
103
+ U1 = spca_lars.fit_transform(Y)
104
+ spca_lasso = SparsePCA(n_components=3, method="cd", random_state=rng)
105
+ U2 = spca_lasso.fit(Y).transform(Y)
106
+ assert_array_almost_equal(U1, U2)
107
+
108
+
109
+ def test_initialization():
110
+ rng = np.random.RandomState(0)
111
+ U_init = rng.randn(5, 3)
112
+ V_init = rng.randn(3, 4)
113
+ model = SparsePCA(
114
+ n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng
115
+ )
116
+ model.fit(rng.randn(5, 4))
117
+ assert_allclose(model.components_, V_init / np.linalg.norm(V_init, axis=1)[:, None])
118
+
119
+
120
+ def test_mini_batch_correct_shapes():
121
+ rng = np.random.RandomState(0)
122
+ X = rng.randn(12, 10)
123
+ pca = MiniBatchSparsePCA(n_components=8, max_iter=1, random_state=rng)
124
+ U = pca.fit_transform(X)
125
+ assert pca.components_.shape == (8, 10)
126
+ assert U.shape == (12, 8)
127
+ # test overcomplete decomposition
128
+ pca = MiniBatchSparsePCA(n_components=13, max_iter=1, random_state=rng)
129
+ U = pca.fit_transform(X)
130
+ assert pca.components_.shape == (13, 10)
131
+ assert U.shape == (12, 13)
132
+
133
+
134
+ # XXX: test always skipped
135
+ @pytest.mark.skipif(True, reason="skipping mini_batch_fit_transform.")
136
+ def test_mini_batch_fit_transform():
137
+ alpha = 1
138
+ rng = np.random.RandomState(0)
139
+ Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
140
+ spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y)
141
+ U1 = spca_lars.transform(Y)
142
+ # Test multiple CPUs
143
+ if sys.platform == "win32": # fake parallelism for win32
144
+ import joblib
145
+
146
+ _mp = joblib.parallel.multiprocessing
147
+ joblib.parallel.multiprocessing = None
148
+ try:
149
+ spca = MiniBatchSparsePCA(
150
+ n_components=3, n_jobs=2, alpha=alpha, random_state=0
151
+ )
152
+ U2 = spca.fit(Y).transform(Y)
153
+ finally:
154
+ joblib.parallel.multiprocessing = _mp
155
+ else: # we can efficiently use parallelism
156
+ spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0)
157
+ U2 = spca.fit(Y).transform(Y)
158
+ assert not np.all(spca_lars.components_ == 0)
159
+ assert_array_almost_equal(U1, U2)
160
+ # Test that CD gives similar results
161
+ spca_lasso = MiniBatchSparsePCA(
162
+ n_components=3, method="cd", alpha=alpha, random_state=0
163
+ ).fit(Y)
164
+ assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
165
+
166
+
167
+ def test_scaling_fit_transform():
168
+ alpha = 1
169
+ rng = np.random.RandomState(0)
170
+ Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
171
+ spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=rng)
172
+ results_train = spca_lars.fit_transform(Y)
173
+ results_test = spca_lars.transform(Y[:10])
174
+ assert_allclose(results_train[0], results_test[0])
175
+
176
+
177
+ def test_pca_vs_spca():
178
+ rng = np.random.RandomState(0)
179
+ Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
180
+ Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
181
+ spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2)
182
+ pca = PCA(n_components=2)
183
+ pca.fit(Y)
184
+ spca.fit(Y)
185
+ results_test_pca = pca.transform(Z)
186
+ results_test_spca = spca.transform(Z)
187
+ assert_allclose(
188
+ np.abs(spca.components_.dot(pca.components_.T)), np.eye(2), atol=1e-5
189
+ )
190
+ results_test_pca *= np.sign(results_test_pca[0, :])
191
+ results_test_spca *= np.sign(results_test_spca[0, :])
192
+ assert_allclose(results_test_pca, results_test_spca)
193
+
194
+
195
+ @pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
196
+ @pytest.mark.parametrize("n_components", [None, 3])
197
+ def test_spca_n_components_(SPCA, n_components):
198
+ rng = np.random.RandomState(0)
199
+ n_samples, n_features = 12, 10
200
+ X = rng.randn(n_samples, n_features)
201
+
202
+ model = SPCA(n_components=n_components).fit(X)
203
+
204
+ if n_components is not None:
205
+ assert model.n_components_ == n_components
206
+ else:
207
+ assert model.n_components_ == n_features
208
+
209
+
210
+ @pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA))
211
+ @pytest.mark.parametrize("method", ("lars", "cd"))
212
+ @pytest.mark.parametrize(
213
+ "data_type, expected_type",
214
+ (
215
+ (np.float32, np.float32),
216
+ (np.float64, np.float64),
217
+ (np.int32, np.float64),
218
+ (np.int64, np.float64),
219
+ ),
220
+ )
221
+ def test_sparse_pca_dtype_match(SPCA, method, data_type, expected_type):
222
+ # Verify output matrix dtype
223
+ n_samples, n_features, n_components = 12, 10, 3
224
+ rng = np.random.RandomState(0)
225
+ input_array = rng.randn(n_samples, n_features).astype(data_type)
226
+ model = SPCA(n_components=n_components, method=method)
227
+ transformed = model.fit_transform(input_array)
228
+
229
+ assert transformed.dtype == expected_type
230
+ assert model.components_.dtype == expected_type
231
+
232
+
233
+ @pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA))
234
+ @pytest.mark.parametrize("method", ("lars", "cd"))
235
+ def test_sparse_pca_numerical_consistency(SPCA, method):
236
+ # Verify numericall consistentency among np.float32 and np.float64
237
+ rtol = 1e-3
238
+ alpha = 2
239
+ n_samples, n_features, n_components = 12, 10, 3
240
+ rng = np.random.RandomState(0)
241
+ input_array = rng.randn(n_samples, n_features)
242
+
243
+ model_32 = SPCA(
244
+ n_components=n_components, alpha=alpha, method=method, random_state=0
245
+ )
246
+ transformed_32 = model_32.fit_transform(input_array.astype(np.float32))
247
+
248
+ model_64 = SPCA(
249
+ n_components=n_components, alpha=alpha, method=method, random_state=0
250
+ )
251
+ transformed_64 = model_64.fit_transform(input_array.astype(np.float64))
252
+
253
+ assert_allclose(transformed_64, transformed_32, rtol=rtol)
254
+ assert_allclose(model_64.components_, model_32.components_, rtol=rtol)
255
+
256
+
257
+ @pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
258
+ def test_spca_feature_names_out(SPCA):
259
+ """Check feature names out for *SparsePCA."""
260
+ rng = np.random.RandomState(0)
261
+ n_samples, n_features = 12, 10
262
+ X = rng.randn(n_samples, n_features)
263
+
264
+ model = SPCA(n_components=4).fit(X)
265
+ names = model.get_feature_names_out()
266
+
267
+ estimator_name = SPCA.__name__.lower()
268
+ assert_array_equal([f"{estimator_name}{i}" for i in range(4)], names)
269
+
270
+
271
+ # TODO(1.6): remove in 1.6
272
+ def test_spca_max_iter_None_deprecation():
273
+ """Check that we raise a warning for the deprecation of `max_iter=None`."""
274
+ rng = np.random.RandomState(0)
275
+ n_samples, n_features = 12, 10
276
+ X = rng.randn(n_samples, n_features)
277
+
278
+ warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed"
279
+ with pytest.warns(FutureWarning, match=warn_msg):
280
+ MiniBatchSparsePCA(max_iter=None).fit(X)
281
+
282
+
283
+ def test_spca_early_stopping(global_random_seed):
284
+ """Check that `tol` and `max_no_improvement` act as early stopping."""
285
+ rng = np.random.RandomState(global_random_seed)
286
+ n_samples, n_features = 50, 10
287
+ X = rng.randn(n_samples, n_features)
288
+
289
+ # vary the tolerance to force the early stopping of one of the model
290
+ model_early_stopped = MiniBatchSparsePCA(
291
+ max_iter=100, tol=0.5, random_state=global_random_seed
292
+ ).fit(X)
293
+ model_not_early_stopped = MiniBatchSparsePCA(
294
+ max_iter=100, tol=1e-3, random_state=global_random_seed
295
+ ).fit(X)
296
+ assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
297
+
298
+ # force the max number of no improvement to a large value to check that
299
+ # it does help to early stop
300
+ model_early_stopped = MiniBatchSparsePCA(
301
+ max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed
302
+ ).fit(X)
303
+ model_not_early_stopped = MiniBatchSparsePCA(
304
+ max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed
305
+ ).fit(X)
306
+ assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
307
+
308
+
309
+ def test_equivalence_components_pca_spca(global_random_seed):
310
+ """Check the equivalence of the components found by PCA and SparsePCA.
311
+
312
+ Non-regression test for:
313
+ https://github.com/scikit-learn/scikit-learn/issues/23932
314
+ """
315
+ rng = np.random.RandomState(global_random_seed)
316
+ X = rng.randn(50, 4)
317
+
318
+ n_components = 2
319
+ pca = PCA(
320
+ n_components=n_components,
321
+ svd_solver="randomized",
322
+ random_state=0,
323
+ ).fit(X)
324
+ spca = SparsePCA(
325
+ n_components=n_components,
326
+ method="lars",
327
+ ridge_alpha=0,
328
+ alpha=0,
329
+ random_state=0,
330
+ ).fit(X)
331
+
332
+ assert_allclose(pca.components_, spca.components_)
333
+
334
+
335
+ def test_sparse_pca_inverse_transform():
336
+ """Check that `inverse_transform` in `SparsePCA` and `PCA` are similar."""
337
+ rng = np.random.RandomState(0)
338
+ n_samples, n_features = 10, 5
339
+ X = rng.randn(n_samples, n_features)
340
+
341
+ n_components = 2
342
+ spca = SparsePCA(
343
+ n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0
344
+ )
345
+ pca = PCA(n_components=n_components, random_state=0)
346
+ X_trans_spca = spca.fit_transform(X)
347
+ X_trans_pca = pca.fit_transform(X)
348
+ assert_allclose(
349
+ spca.inverse_transform(X_trans_spca), pca.inverse_transform(X_trans_pca)
350
+ )
351
+
352
+
353
+ @pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
354
+ def test_transform_inverse_transform_round_trip(SPCA):
355
+ """Check the `transform` and `inverse_transform` round trip with no loss of
356
+ information.
357
+ """
358
+ rng = np.random.RandomState(0)
359
+ n_samples, n_features = 10, 5
360
+ X = rng.randn(n_samples, n_features)
361
+
362
+ n_components = n_features
363
+ spca = SPCA(
364
+ n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0
365
+ )
366
+ X_trans_spca = spca.fit_transform(X)
367
+ assert_allclose(spca.inverse_transform(X_trans_spca), X)
llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test truncated SVD transformer."""
2
+
3
+ import numpy as np
4
+ import pytest
5
+ import scipy.sparse as sp
6
+
7
+ from sklearn.decomposition import PCA, TruncatedSVD
8
+ from sklearn.utils import check_random_state
9
+ from sklearn.utils._testing import assert_allclose, assert_array_less
10
+
11
+ SVD_SOLVERS = ["arpack", "randomized"]
12
+
13
+
14
+ @pytest.fixture(scope="module")
15
+ def X_sparse():
16
+ # Make an X that looks somewhat like a small tf-idf matrix.
17
+ rng = check_random_state(42)
18
+ X = sp.random(60, 55, density=0.2, format="csr", random_state=rng)
19
+ X.data[:] = 1 + np.log(X.data)
20
+ return X
21
+
22
+
23
+ @pytest.mark.parametrize("solver", ["randomized"])
24
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
25
+ def test_solvers(X_sparse, solver, kind):
26
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
27
+ svd_a = TruncatedSVD(30, algorithm="arpack")
28
+ svd = TruncatedSVD(30, algorithm=solver, random_state=42, n_oversamples=100)
29
+
30
+ Xa = svd_a.fit_transform(X)[:, :6]
31
+ Xr = svd.fit_transform(X)[:, :6]
32
+ assert_allclose(Xa, Xr, rtol=2e-3)
33
+
34
+ comp_a = np.abs(svd_a.components_)
35
+ comp = np.abs(svd.components_)
36
+ # All elements are equal, but some elements are more equal than others.
37
+ assert_allclose(comp_a[:9], comp[:9], rtol=1e-3)
38
+ assert_allclose(comp_a[9:], comp[9:], atol=1e-2)
39
+
40
+
41
+ @pytest.mark.parametrize("n_components", (10, 25, 41, 55))
42
+ def test_attributes(n_components, X_sparse):
43
+ n_features = X_sparse.shape[1]
44
+ tsvd = TruncatedSVD(n_components).fit(X_sparse)
45
+ assert tsvd.n_components == n_components
46
+ assert tsvd.components_.shape == (n_components, n_features)
47
+
48
+
49
+ @pytest.mark.parametrize(
50
+ "algorithm, n_components",
51
+ [
52
+ ("arpack", 55),
53
+ ("arpack", 56),
54
+ ("randomized", 56),
55
+ ],
56
+ )
57
+ def test_too_many_components(X_sparse, algorithm, n_components):
58
+ tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
59
+ with pytest.raises(ValueError):
60
+ tsvd.fit(X_sparse)
61
+
62
+
63
+ @pytest.mark.parametrize("fmt", ("array", "csr", "csc", "coo", "lil"))
64
+ def test_sparse_formats(fmt, X_sparse):
65
+ n_samples = X_sparse.shape[0]
66
+ Xfmt = X_sparse.toarray() if fmt == "dense" else getattr(X_sparse, "to" + fmt)()
67
+ tsvd = TruncatedSVD(n_components=11)
68
+ Xtrans = tsvd.fit_transform(Xfmt)
69
+ assert Xtrans.shape == (n_samples, 11)
70
+ Xtrans = tsvd.transform(Xfmt)
71
+ assert Xtrans.shape == (n_samples, 11)
72
+
73
+
74
+ @pytest.mark.parametrize("algo", SVD_SOLVERS)
75
+ def test_inverse_transform(algo, X_sparse):
76
+ # We need a lot of components for the reconstruction to be "almost
77
+ # equal" in all positions. XXX Test means or sums instead?
78
+ tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
79
+ Xt = tsvd.fit_transform(X_sparse)
80
+ Xinv = tsvd.inverse_transform(Xt)
81
+ assert_allclose(Xinv, X_sparse.toarray(), rtol=1e-1, atol=2e-1)
82
+
83
+
84
+ def test_integers(X_sparse):
85
+ n_samples = X_sparse.shape[0]
86
+ Xint = X_sparse.astype(np.int64)
87
+ tsvd = TruncatedSVD(n_components=6)
88
+ Xtrans = tsvd.fit_transform(Xint)
89
+ assert Xtrans.shape == (n_samples, tsvd.n_components)
90
+
91
+
92
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
93
+ @pytest.mark.parametrize("n_components", [10, 20])
94
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
95
+ def test_explained_variance(X_sparse, kind, n_components, solver):
96
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
97
+ svd = TruncatedSVD(n_components, algorithm=solver)
98
+ X_tr = svd.fit_transform(X)
99
+ # Assert that all the values are greater than 0
100
+ assert_array_less(0.0, svd.explained_variance_ratio_)
101
+
102
+ # Assert that total explained variance is less than 1
103
+ assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
104
+
105
+ # Test that explained_variance is correct
106
+ total_variance = np.var(X_sparse.toarray(), axis=0).sum()
107
+ variances = np.var(X_tr, axis=0)
108
+ true_explained_variance_ratio = variances / total_variance
109
+
110
+ assert_allclose(
111
+ svd.explained_variance_ratio_,
112
+ true_explained_variance_ratio,
113
+ )
114
+
115
+
116
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
117
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
118
+ def test_explained_variance_components_10_20(X_sparse, kind, solver):
119
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
120
+ svd_10 = TruncatedSVD(10, algorithm=solver, n_iter=10).fit(X)
121
+ svd_20 = TruncatedSVD(20, algorithm=solver, n_iter=10).fit(X)
122
+
123
+ # Assert the 1st component is equal
124
+ assert_allclose(
125
+ svd_10.explained_variance_ratio_,
126
+ svd_20.explained_variance_ratio_[:10],
127
+ rtol=5e-3,
128
+ )
129
+
130
+ # Assert that 20 components has higher explained variance than 10
131
+ assert (
132
+ svd_20.explained_variance_ratio_.sum() > svd_10.explained_variance_ratio_.sum()
133
+ )
134
+
135
+
136
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
137
+ def test_singular_values_consistency(solver):
138
+ # Check that the TruncatedSVD output has the correct singular values
139
+ rng = np.random.RandomState(0)
140
+ n_samples, n_features = 100, 80
141
+ X = rng.randn(n_samples, n_features)
142
+
143
+ pca = TruncatedSVD(n_components=2, algorithm=solver, random_state=rng).fit(X)
144
+
145
+ # Compare to the Frobenius norm
146
+ X_pca = pca.transform(X)
147
+ assert_allclose(
148
+ np.sum(pca.singular_values_**2.0),
149
+ np.linalg.norm(X_pca, "fro") ** 2.0,
150
+ rtol=1e-2,
151
+ )
152
+
153
+ # Compare to the 2-norms of the score vectors
154
+ assert_allclose(
155
+ pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), rtol=1e-2
156
+ )
157
+
158
+
159
+ @pytest.mark.parametrize("solver", SVD_SOLVERS)
160
+ def test_singular_values_expected(solver):
161
+ # Set the singular values and see what we get back
162
+ rng = np.random.RandomState(0)
163
+ n_samples = 100
164
+ n_features = 110
165
+
166
+ X = rng.randn(n_samples, n_features)
167
+
168
+ pca = TruncatedSVD(n_components=3, algorithm=solver, random_state=rng)
169
+ X_pca = pca.fit_transform(X)
170
+
171
+ X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
172
+ X_pca[:, 0] *= 3.142
173
+ X_pca[:, 1] *= 2.718
174
+
175
+ X_hat_pca = np.dot(X_pca, pca.components_)
176
+ pca.fit(X_hat_pca)
177
+ assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0], rtol=1e-14)
178
+
179
+
180
+ def test_truncated_svd_eq_pca(X_sparse):
181
+ # TruncatedSVD should be equal to PCA on centered data
182
+
183
+ X_dense = X_sparse.toarray()
184
+
185
+ X_c = X_dense - X_dense.mean(axis=0)
186
+
187
+ params = dict(n_components=10, random_state=42)
188
+
189
+ svd = TruncatedSVD(algorithm="arpack", **params)
190
+ pca = PCA(svd_solver="arpack", **params)
191
+
192
+ Xt_svd = svd.fit_transform(X_c)
193
+ Xt_pca = pca.fit_transform(X_c)
194
+
195
+ assert_allclose(Xt_svd, Xt_pca, rtol=1e-9)
196
+ assert_allclose(pca.mean_, 0, atol=1e-9)
197
+ assert_allclose(svd.components_, pca.components_)
198
+
199
+
200
+ @pytest.mark.parametrize(
201
+ "algorithm, tol", [("randomized", 0.0), ("arpack", 1e-6), ("arpack", 0.0)]
202
+ )
203
+ @pytest.mark.parametrize("kind", ("dense", "sparse"))
204
+ def test_fit_transform(X_sparse, algorithm, tol, kind):
205
+ # fit_transform(X) should equal fit(X).transform(X)
206
+ X = X_sparse if kind == "sparse" else X_sparse.toarray()
207
+ svd = TruncatedSVD(
208
+ n_components=5, n_iter=7, random_state=42, algorithm=algorithm, tol=tol
209
+ )
210
+ X_transformed_1 = svd.fit_transform(X)
211
+ X_transformed_2 = svd.fit(X).transform(X)
212
+ assert_allclose(X_transformed_1, X_transformed_2)
llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__init__.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing
2
+
3
+ from ._plot import LearningCurveDisplay, ValidationCurveDisplay
4
+ from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV
5
+ from ._split import (
6
+ BaseCrossValidator,
7
+ BaseShuffleSplit,
8
+ GroupKFold,
9
+ GroupShuffleSplit,
10
+ KFold,
11
+ LeaveOneGroupOut,
12
+ LeaveOneOut,
13
+ LeavePGroupsOut,
14
+ LeavePOut,
15
+ PredefinedSplit,
16
+ RepeatedKFold,
17
+ RepeatedStratifiedKFold,
18
+ ShuffleSplit,
19
+ StratifiedGroupKFold,
20
+ StratifiedKFold,
21
+ StratifiedShuffleSplit,
22
+ TimeSeriesSplit,
23
+ check_cv,
24
+ train_test_split,
25
+ )
26
+ from ._validation import (
27
+ cross_val_predict,
28
+ cross_val_score,
29
+ cross_validate,
30
+ learning_curve,
31
+ permutation_test_score,
32
+ validation_curve,
33
+ )
34
+
35
+ if typing.TYPE_CHECKING:
36
+ # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
37
+ # TODO: remove this check once the estimator is no longer experimental.
38
+ from ._search_successive_halving import ( # noqa
39
+ HalvingGridSearchCV,
40
+ HalvingRandomSearchCV,
41
+ )
42
+
43
+
44
+ __all__ = [
45
+ "BaseCrossValidator",
46
+ "BaseShuffleSplit",
47
+ "GridSearchCV",
48
+ "TimeSeriesSplit",
49
+ "KFold",
50
+ "GroupKFold",
51
+ "GroupShuffleSplit",
52
+ "LeaveOneGroupOut",
53
+ "LeaveOneOut",
54
+ "LeavePGroupsOut",
55
+ "LeavePOut",
56
+ "RepeatedKFold",
57
+ "RepeatedStratifiedKFold",
58
+ "ParameterGrid",
59
+ "ParameterSampler",
60
+ "PredefinedSplit",
61
+ "RandomizedSearchCV",
62
+ "ShuffleSplit",
63
+ "StratifiedKFold",
64
+ "StratifiedGroupKFold",
65
+ "StratifiedShuffleSplit",
66
+ "check_cv",
67
+ "cross_val_predict",
68
+ "cross_val_score",
69
+ "cross_validate",
70
+ "learning_curve",
71
+ "LearningCurveDisplay",
72
+ "permutation_test_score",
73
+ "train_test_split",
74
+ "validation_curve",
75
+ "ValidationCurveDisplay",
76
+ ]
77
+
78
+
79
+ # TODO: remove this check once the estimator is no longer experimental.
80
+ def __getattr__(name):
81
+ if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}:
82
+ raise ImportError(
83
+ f"{name} is experimental and the API might change without any "
84
+ "deprecation cycle. To use it, you need to explicitly import "
85
+ "enable_halving_search_cv:\n"
86
+ "from sklearn.experimental import enable_halving_search_cv"
87
+ )
88
+ raise AttributeError(f"module {__name__} has no attribute {name}")
llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc ADDED
Binary file (64.7 kB). View file