applied-ai-018 commited on
Commit
c4282f5
·
verified ·
1 Parent(s): e34f5cb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz +3 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz +3 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz +3 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz +3 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1.json.gz +3 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-s-act-.json.gz +3 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz +3 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz +3 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz +3 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz +3 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/data-v1-dl-3.arff.gz +3 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz +3 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz +3 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz +3 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz +3 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz +3 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz +3 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz +3 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz +3 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__init__.py +52 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_base.py +193 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py +2301 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py +458 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py +795 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py +409 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py +572 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_lda.py +929 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py +2443 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_pca.py +747 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py +551 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py +319 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py +451 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py +1062 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py +987 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py +19 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py +452 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py +197 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py +325 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/image.py +671 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py +0 -0
.gitattributes CHANGED
@@ -213,3 +213,4 @@ llmeval-env/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_6
213
  llmeval-env/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
214
  llmeval-env/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
215
  llmeval-env/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
213
  llmeval-env/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
214
  llmeval-env/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
215
  llmeval-env/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
216
+ env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e6a38d79d8f9e53a2ce11b68b4153062d4e96ec0b368d02b2e64f1b33c51693
3
+ size 551
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190
3
+ size 306
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190
3
+ size 306
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ef6025425fdfc5f736555ea385252af5bcbf62383615db82489366d4f96a0a7
3
+ size 327
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9da09e9a6031d060ec416f639a6bf34989e6c88ce641d10621eb906ba1d8c293
3
+ size 99
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-s-act-.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35890d08165c804526b48aad462d7ccc09e808bd7975ba604bd612b9608797ac
3
+ size 319
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7ee24adabd4aaed6419b43fe9d3f86d55fcf4bee0f1698ae21d86c2701314e3
3
+ size 2532
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066a216679b197cc51946e17ee9a2e28215425991b0ceb7f10988c14f7f3f869
3
+ size 2473
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec4f2d6bc4df3882b08bba01571e0792a56f79e0a922d984897773acd284b426
3
+ size 535
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ef19cfad25c5de487ddbaef3c4d068ca3063777730a288dfd6f5096a0c6f46
3
+ size 1407
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/data-v1-dl-3.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c63fdf8861761f1ca70509f7d2d169a7cc053988c7b7c09c09a6db6124e208be
3
+ size 19485
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59d1aa6b02d2358c16fa9e4fbeff523a3bd10ebd38c7c371911fa8335e7bdcbf
3
+ size 598
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0f7973193eb35d19e99d1d8bca3c7f3a8b8d0410508af34ad571aee8ec5ab05
3
+ size 913
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36c63c3ac8c9db59910acbf4c772cd53040ccd0eac0b0452611dd7ad8da50474
3
+ size 1660
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8adac8e2f8cbcbfa9677acdd4927a961430465d2c99401832160be455cfaced8
3
+ size 3690
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c203b4627175cebbf527d81917a499911af915f6f2f46ee7248428a948d603
3
+ size 325
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301396b4a42c814b1a15038ddfcbcf5c8590501231747d0dc2a500b84b2fd0df
3
+ size 328
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dee83987fffa8ec20e23b3cabc00d42beb7a469af6bd803909998c1687fa634
3
+ size 934
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c5fd93ffec7deb63a940fd698534dd7ebb7db349fc183930041cbf17e60e2cc
3
+ size 6471
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.decomposition` module includes matrix decomposition
3
+ algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
4
+ this module can be regarded as dimensionality reduction techniques.
5
+ """
6
+
7
+
8
+ from ..utils.extmath import randomized_svd
9
+ from ._dict_learning import (
10
+ DictionaryLearning,
11
+ MiniBatchDictionaryLearning,
12
+ SparseCoder,
13
+ dict_learning,
14
+ dict_learning_online,
15
+ sparse_encode,
16
+ )
17
+ from ._factor_analysis import FactorAnalysis
18
+ from ._fastica import FastICA, fastica
19
+ from ._incremental_pca import IncrementalPCA
20
+ from ._kernel_pca import KernelPCA
21
+ from ._lda import LatentDirichletAllocation
22
+ from ._nmf import (
23
+ NMF,
24
+ MiniBatchNMF,
25
+ non_negative_factorization,
26
+ )
27
+ from ._pca import PCA
28
+ from ._sparse_pca import MiniBatchSparsePCA, SparsePCA
29
+ from ._truncated_svd import TruncatedSVD
30
+
31
+ __all__ = [
32
+ "DictionaryLearning",
33
+ "FastICA",
34
+ "IncrementalPCA",
35
+ "KernelPCA",
36
+ "MiniBatchDictionaryLearning",
37
+ "MiniBatchNMF",
38
+ "MiniBatchSparsePCA",
39
+ "NMF",
40
+ "PCA",
41
+ "SparseCoder",
42
+ "SparsePCA",
43
+ "dict_learning",
44
+ "dict_learning_online",
45
+ "fastica",
46
+ "non_negative_factorization",
47
+ "randomized_svd",
48
+ "sparse_encode",
49
+ "FactorAnalysis",
50
+ "TruncatedSVD",
51
+ "LatentDirichletAllocation",
52
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_base.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Principal Component Analysis Base Classes"""
2
+
3
+ # Author: Alexandre Gramfort <[email protected]>
4
+ # Olivier Grisel <[email protected]>
5
+ # Mathieu Blondel <[email protected]>
6
+ # Denis A. Engemann <[email protected]>
7
+ # Kyle Kastner <[email protected]>
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ import numpy as np
14
+ from scipy import linalg
15
+ from scipy.sparse import issparse
16
+
17
+ from ..base import BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin
18
+ from ..utils._array_api import _add_to_diagonal, device, get_namespace
19
+ from ..utils.sparsefuncs import _implicit_column_offset
20
+ from ..utils.validation import check_is_fitted
21
+
22
+
23
+ class _BasePCA(
24
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
25
+ ):
26
+ """Base class for PCA methods.
27
+
28
+ Warning: This class should not be used directly.
29
+ Use derived classes instead.
30
+ """
31
+
32
+ def get_covariance(self):
33
+ """Compute data covariance with the generative model.
34
+
35
+ ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
36
+ where S**2 contains the explained variances, and sigma2 contains the
37
+ noise variances.
38
+
39
+ Returns
40
+ -------
41
+ cov : array of shape=(n_features, n_features)
42
+ Estimated covariance of data.
43
+ """
44
+ xp, _ = get_namespace(self.components_)
45
+
46
+ components_ = self.components_
47
+ exp_var = self.explained_variance_
48
+ if self.whiten:
49
+ components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
50
+ exp_var_diff = exp_var - self.noise_variance_
51
+ exp_var_diff = xp.where(
52
+ exp_var > self.noise_variance_,
53
+ exp_var_diff,
54
+ xp.asarray(0.0, device=device(exp_var)),
55
+ )
56
+ cov = (components_.T * exp_var_diff) @ components_
57
+ _add_to_diagonal(cov, self.noise_variance_, xp)
58
+ return cov
59
+
60
+ def get_precision(self):
61
+ """Compute data precision matrix with the generative model.
62
+
63
+ Equals the inverse of the covariance but computed with
64
+ the matrix inversion lemma for efficiency.
65
+
66
+ Returns
67
+ -------
68
+ precision : array, shape=(n_features, n_features)
69
+ Estimated precision of data.
70
+ """
71
+ xp, is_array_api_compliant = get_namespace(self.components_)
72
+
73
+ n_features = self.components_.shape[1]
74
+
75
+ # handle corner cases first
76
+ if self.n_components_ == 0:
77
+ return xp.eye(n_features) / self.noise_variance_
78
+
79
+ if is_array_api_compliant:
80
+ linalg_inv = xp.linalg.inv
81
+ else:
82
+ linalg_inv = linalg.inv
83
+
84
+ if self.noise_variance_ == 0.0:
85
+ return linalg_inv(self.get_covariance())
86
+
87
+ # Get precision using matrix inversion lemma
88
+ components_ = self.components_
89
+ exp_var = self.explained_variance_
90
+ if self.whiten:
91
+ components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
92
+ exp_var_diff = exp_var - self.noise_variance_
93
+ exp_var_diff = xp.where(
94
+ exp_var > self.noise_variance_,
95
+ exp_var_diff,
96
+ xp.asarray(0.0, device=device(exp_var)),
97
+ )
98
+ precision = components_ @ components_.T / self.noise_variance_
99
+ _add_to_diagonal(precision, 1.0 / exp_var_diff, xp)
100
+ precision = components_.T @ linalg_inv(precision) @ components_
101
+ precision /= -(self.noise_variance_**2)
102
+ _add_to_diagonal(precision, 1.0 / self.noise_variance_, xp)
103
+ return precision
104
+
105
+ @abstractmethod
106
+ def fit(self, X, y=None):
107
+ """Placeholder for fit. Subclasses should implement this method!
108
+
109
+ Fit the model with X.
110
+
111
+ Parameters
112
+ ----------
113
+ X : array-like of shape (n_samples, n_features)
114
+ Training data, where `n_samples` is the number of samples and
115
+ `n_features` is the number of features.
116
+
117
+ Returns
118
+ -------
119
+ self : object
120
+ Returns the instance itself.
121
+ """
122
+
123
+ def transform(self, X):
124
+ """Apply dimensionality reduction to X.
125
+
126
+ X is projected on the first principal components previously extracted
127
+ from a training set.
128
+
129
+ Parameters
130
+ ----------
131
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
132
+ New data, where `n_samples` is the number of samples
133
+ and `n_features` is the number of features.
134
+
135
+ Returns
136
+ -------
137
+ X_new : array-like of shape (n_samples, n_components)
138
+ Projection of X in the first principal components, where `n_samples`
139
+ is the number of samples and `n_components` is the number of the components.
140
+ """
141
+ xp, _ = get_namespace(X)
142
+
143
+ check_is_fitted(self)
144
+
145
+ X = self._validate_data(
146
+ X, accept_sparse=("csr", "csc"), dtype=[xp.float64, xp.float32], reset=False
147
+ )
148
+ if self.mean_ is not None:
149
+ if issparse(X):
150
+ X = _implicit_column_offset(X, self.mean_)
151
+ else:
152
+ X = X - self.mean_
153
+ X_transformed = X @ self.components_.T
154
+ if self.whiten:
155
+ X_transformed /= xp.sqrt(self.explained_variance_)
156
+ return X_transformed
157
+
158
+ def inverse_transform(self, X):
159
+ """Transform data back to its original space.
160
+
161
+ In other words, return an input `X_original` whose transform would be X.
162
+
163
+ Parameters
164
+ ----------
165
+ X : array-like of shape (n_samples, n_components)
166
+ New data, where `n_samples` is the number of samples
167
+ and `n_components` is the number of components.
168
+
169
+ Returns
170
+ -------
171
+ X_original array-like of shape (n_samples, n_features)
172
+ Original data, where `n_samples` is the number of samples
173
+ and `n_features` is the number of features.
174
+
175
+ Notes
176
+ -----
177
+ If whitening is enabled, inverse_transform will compute the
178
+ exact inverse operation, which includes reversing whitening.
179
+ """
180
+ xp, _ = get_namespace(X)
181
+
182
+ if self.whiten:
183
+ scaled_components = (
184
+ xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_
185
+ )
186
+ return X @ scaled_components + self.mean_
187
+ else:
188
+ return X @ self.components_ + self.mean_
189
+
190
+ @property
191
+ def _n_features_out(self):
192
+ """Number of transformed output features."""
193
+ return self.components_.shape[0]
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (246 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py ADDED
@@ -0,0 +1,2301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Dictionary learning.
2
+ """
3
+ # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
4
+ # License: BSD 3 clause
5
+
6
+ import itertools
7
+ import sys
8
+ import time
9
+ from numbers import Integral, Real
10
+ from warnings import warn
11
+
12
+ import numpy as np
13
+ from joblib import effective_n_jobs
14
+ from scipy import linalg
15
+
16
+ from ..base import (
17
+ BaseEstimator,
18
+ ClassNamePrefixFeaturesOutMixin,
19
+ TransformerMixin,
20
+ _fit_context,
21
+ )
22
+ from ..linear_model import Lars, Lasso, LassoLars, orthogonal_mp_gram
23
+ from ..utils import check_array, check_random_state, gen_batches, gen_even_slices
24
+ from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
25
+ from ..utils.extmath import randomized_svd, row_norms, svd_flip
26
+ from ..utils.parallel import Parallel, delayed
27
+ from ..utils.validation import check_is_fitted
28
+
29
+
30
+ def _check_positive_coding(method, positive):
31
+ if positive and method in ["omp", "lars"]:
32
+ raise ValueError(
33
+ "Positive constraint not supported for '{}' coding method.".format(method)
34
+ )
35
+
36
+
37
+ def _sparse_encode_precomputed(
38
+ X,
39
+ dictionary,
40
+ *,
41
+ gram=None,
42
+ cov=None,
43
+ algorithm="lasso_lars",
44
+ regularization=None,
45
+ copy_cov=True,
46
+ init=None,
47
+ max_iter=1000,
48
+ verbose=0,
49
+ positive=False,
50
+ ):
51
+ """Generic sparse coding with precomputed Gram and/or covariance matrices.
52
+
53
+ Each row of the result is the solution to a Lasso problem.
54
+
55
+ Parameters
56
+ ----------
57
+ X : ndarray of shape (n_samples, n_features)
58
+ Data matrix.
59
+
60
+ dictionary : ndarray of shape (n_components, n_features)
61
+ The dictionary matrix against which to solve the sparse coding of
62
+ the data. Some of the algorithms assume normalized rows.
63
+
64
+ gram : ndarray of shape (n_components, n_components), default=None
65
+ Precomputed Gram matrix, `dictionary * dictionary'`
66
+ gram can be `None` if method is 'threshold'.
67
+
68
+ cov : ndarray of shape (n_components, n_samples), default=None
69
+ Precomputed covariance, `dictionary * X'`.
70
+
71
+ algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
72
+ default='lasso_lars'
73
+ The algorithm used:
74
+
75
+ * `'lars'`: uses the least angle regression method
76
+ (`linear_model.lars_path`);
77
+ * `'lasso_lars'`: uses Lars to compute the Lasso solution;
78
+ * `'lasso_cd'`: uses the coordinate descent method to compute the
79
+ Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
80
+ the estimated components are sparse;
81
+ * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
82
+ solution;
83
+ * `'threshold'`: squashes to zero all coefficients less than
84
+ regularization from the projection `dictionary * data'`.
85
+
86
+ regularization : int or float, default=None
87
+ The regularization parameter. It corresponds to alpha when
88
+ algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
89
+ Otherwise it corresponds to `n_nonzero_coefs`.
90
+
91
+ init : ndarray of shape (n_samples, n_components), default=None
92
+ Initialization value of the sparse code. Only used if
93
+ `algorithm='lasso_cd'`.
94
+
95
+ max_iter : int, default=1000
96
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
97
+ `'lasso_lars'`.
98
+
99
+ copy_cov : bool, default=True
100
+ Whether to copy the precomputed covariance matrix; if `False`, it may
101
+ be overwritten.
102
+
103
+ verbose : int, default=0
104
+ Controls the verbosity; the higher, the more messages.
105
+
106
+ positive: bool, default=False
107
+ Whether to enforce a positivity constraint on the sparse code.
108
+
109
+ .. versionadded:: 0.20
110
+
111
+ Returns
112
+ -------
113
+ code : ndarray of shape (n_components, n_features)
114
+ The sparse codes.
115
+ """
116
+ n_samples, n_features = X.shape
117
+ n_components = dictionary.shape[0]
118
+
119
+ if algorithm == "lasso_lars":
120
+ alpha = float(regularization) / n_features # account for scaling
121
+ try:
122
+ err_mgt = np.seterr(all="ignore")
123
+
124
+ # Not passing in verbose=max(0, verbose-1) because Lars.fit already
125
+ # corrects the verbosity level.
126
+ lasso_lars = LassoLars(
127
+ alpha=alpha,
128
+ fit_intercept=False,
129
+ verbose=verbose,
130
+ precompute=gram,
131
+ fit_path=False,
132
+ positive=positive,
133
+ max_iter=max_iter,
134
+ )
135
+ lasso_lars.fit(dictionary.T, X.T, Xy=cov)
136
+ new_code = lasso_lars.coef_
137
+ finally:
138
+ np.seterr(**err_mgt)
139
+
140
+ elif algorithm == "lasso_cd":
141
+ alpha = float(regularization) / n_features # account for scaling
142
+
143
+ # TODO: Make verbosity argument for Lasso?
144
+ # sklearn.linear_model.coordinate_descent.enet_path has a verbosity
145
+ # argument that we could pass in from Lasso.
146
+ clf = Lasso(
147
+ alpha=alpha,
148
+ fit_intercept=False,
149
+ precompute=gram,
150
+ max_iter=max_iter,
151
+ warm_start=True,
152
+ positive=positive,
153
+ )
154
+
155
+ if init is not None:
156
+ # In some workflows using coordinate descent algorithms:
157
+ # - users might provide NumPy arrays with read-only buffers
158
+ # - `joblib` might memmap arrays making their buffer read-only
159
+ # TODO: move this handling (which is currently too broad)
160
+ # closer to the actual private function which need buffers to be writable.
161
+ if not init.flags["WRITEABLE"]:
162
+ init = np.array(init)
163
+ clf.coef_ = init
164
+
165
+ clf.fit(dictionary.T, X.T, check_input=False)
166
+ new_code = clf.coef_
167
+
168
+ elif algorithm == "lars":
169
+ try:
170
+ err_mgt = np.seterr(all="ignore")
171
+
172
+ # Not passing in verbose=max(0, verbose-1) because Lars.fit already
173
+ # corrects the verbosity level.
174
+ lars = Lars(
175
+ fit_intercept=False,
176
+ verbose=verbose,
177
+ precompute=gram,
178
+ n_nonzero_coefs=int(regularization),
179
+ fit_path=False,
180
+ )
181
+ lars.fit(dictionary.T, X.T, Xy=cov)
182
+ new_code = lars.coef_
183
+ finally:
184
+ np.seterr(**err_mgt)
185
+
186
+ elif algorithm == "threshold":
187
+ new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T
188
+ if positive:
189
+ np.clip(new_code, 0, None, out=new_code)
190
+
191
+ elif algorithm == "omp":
192
+ new_code = orthogonal_mp_gram(
193
+ Gram=gram,
194
+ Xy=cov,
195
+ n_nonzero_coefs=int(regularization),
196
+ tol=None,
197
+ norms_squared=row_norms(X, squared=True),
198
+ copy_Xy=copy_cov,
199
+ ).T
200
+
201
+ return new_code.reshape(n_samples, n_components)
202
+
203
+
204
+ @validate_params(
205
+ {
206
+ "X": ["array-like"],
207
+ "dictionary": ["array-like"],
208
+ "gram": ["array-like", None],
209
+ "cov": ["array-like", None],
210
+ "algorithm": [
211
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
212
+ ],
213
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
214
+ "alpha": [Interval(Real, 0, None, closed="left"), None],
215
+ "copy_cov": ["boolean"],
216
+ "init": ["array-like", None],
217
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
218
+ "n_jobs": [Integral, None],
219
+ "check_input": ["boolean"],
220
+ "verbose": ["verbose"],
221
+ "positive": ["boolean"],
222
+ },
223
+ prefer_skip_nested_validation=True,
224
+ )
225
+ # XXX : could be moved to the linear_model module
226
+ def sparse_encode(
227
+ X,
228
+ dictionary,
229
+ *,
230
+ gram=None,
231
+ cov=None,
232
+ algorithm="lasso_lars",
233
+ n_nonzero_coefs=None,
234
+ alpha=None,
235
+ copy_cov=True,
236
+ init=None,
237
+ max_iter=1000,
238
+ n_jobs=None,
239
+ check_input=True,
240
+ verbose=0,
241
+ positive=False,
242
+ ):
243
+ """Sparse coding.
244
+
245
+ Each row of the result is the solution to a sparse coding problem.
246
+ The goal is to find a sparse array `code` such that::
247
+
248
+ X ~= code * dictionary
249
+
250
+ Read more in the :ref:`User Guide <SparseCoder>`.
251
+
252
+ Parameters
253
+ ----------
254
+ X : array-like of shape (n_samples, n_features)
255
+ Data matrix.
256
+
257
+ dictionary : array-like of shape (n_components, n_features)
258
+ The dictionary matrix against which to solve the sparse coding of
259
+ the data. Some of the algorithms assume normalized rows for meaningful
260
+ output.
261
+
262
+ gram : array-like of shape (n_components, n_components), default=None
263
+ Precomputed Gram matrix, `dictionary * dictionary'`.
264
+
265
+ cov : array-like of shape (n_components, n_samples), default=None
266
+ Precomputed covariance, `dictionary' * X`.
267
+
268
+ algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
269
+ default='lasso_lars'
270
+ The algorithm used:
271
+
272
+ * `'lars'`: uses the least angle regression method
273
+ (`linear_model.lars_path`);
274
+ * `'lasso_lars'`: uses Lars to compute the Lasso solution;
275
+ * `'lasso_cd'`: uses the coordinate descent method to compute the
276
+ Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
277
+ the estimated components are sparse;
278
+ * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
279
+ solution;
280
+ * `'threshold'`: squashes to zero all coefficients less than
281
+ regularization from the projection `dictionary * data'`.
282
+
283
+ n_nonzero_coefs : int, default=None
284
+ Number of nonzero coefficients to target in each column of the
285
+ solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
286
+ and is overridden by `alpha` in the `omp` case. If `None`, then
287
+ `n_nonzero_coefs=int(n_features / 10)`.
288
+
289
+ alpha : float, default=None
290
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
291
+ penalty applied to the L1 norm.
292
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
293
+ threshold below which coefficients will be squashed to zero.
294
+ If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
295
+ the reconstruction error targeted. In this case, it overrides
296
+ `n_nonzero_coefs`.
297
+ If `None`, default to 1.
298
+
299
+ copy_cov : bool, default=True
300
+ Whether to copy the precomputed covariance matrix; if `False`, it may
301
+ be overwritten.
302
+
303
+ init : ndarray of shape (n_samples, n_components), default=None
304
+ Initialization value of the sparse codes. Only used if
305
+ `algorithm='lasso_cd'`.
306
+
307
+ max_iter : int, default=1000
308
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
309
+ `'lasso_lars'`.
310
+
311
+ n_jobs : int, default=None
312
+ Number of parallel jobs to run.
313
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
314
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
315
+ for more details.
316
+
317
+ check_input : bool, default=True
318
+ If `False`, the input arrays X and dictionary will not be checked.
319
+
320
+ verbose : int, default=0
321
+ Controls the verbosity; the higher, the more messages.
322
+
323
+ positive : bool, default=False
324
+ Whether to enforce positivity when finding the encoding.
325
+
326
+ .. versionadded:: 0.20
327
+
328
+ Returns
329
+ -------
330
+ code : ndarray of shape (n_samples, n_components)
331
+ The sparse codes.
332
+
333
+ See Also
334
+ --------
335
+ sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso
336
+ path using LARS algorithm.
337
+ sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems.
338
+ sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
339
+ SparseCoder : Find a sparse representation of data from a fixed precomputed
340
+ dictionary.
341
+
342
+ Examples
343
+ --------
344
+ >>> import numpy as np
345
+ >>> from sklearn.decomposition import sparse_encode
346
+ >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
347
+ >>> dictionary = np.array(
348
+ ... [[0, 1, 0],
349
+ ... [-1, -1, 2],
350
+ ... [1, 1, 1],
351
+ ... [0, 1, 1],
352
+ ... [0, 2, 1]],
353
+ ... dtype=np.float64
354
+ ... )
355
+ >>> sparse_encode(X, dictionary, alpha=1e-10)
356
+ array([[ 0., 0., -1., 0., 0.],
357
+ [ 0., 1., 1., 0., 0.]])
358
+ """
359
+ if check_input:
360
+ if algorithm == "lasso_cd":
361
+ dictionary = check_array(
362
+ dictionary, order="C", dtype=[np.float64, np.float32]
363
+ )
364
+ X = check_array(X, order="C", dtype=[np.float64, np.float32])
365
+ else:
366
+ dictionary = check_array(dictionary)
367
+ X = check_array(X)
368
+
369
+ if dictionary.shape[1] != X.shape[1]:
370
+ raise ValueError(
371
+ "Dictionary and X have different numbers of features:"
372
+ "dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape)
373
+ )
374
+
375
+ _check_positive_coding(algorithm, positive)
376
+
377
+ return _sparse_encode(
378
+ X,
379
+ dictionary,
380
+ gram=gram,
381
+ cov=cov,
382
+ algorithm=algorithm,
383
+ n_nonzero_coefs=n_nonzero_coefs,
384
+ alpha=alpha,
385
+ copy_cov=copy_cov,
386
+ init=init,
387
+ max_iter=max_iter,
388
+ n_jobs=n_jobs,
389
+ verbose=verbose,
390
+ positive=positive,
391
+ )
392
+
393
+
394
+ def _sparse_encode(
395
+ X,
396
+ dictionary,
397
+ *,
398
+ gram=None,
399
+ cov=None,
400
+ algorithm="lasso_lars",
401
+ n_nonzero_coefs=None,
402
+ alpha=None,
403
+ copy_cov=True,
404
+ init=None,
405
+ max_iter=1000,
406
+ n_jobs=None,
407
+ verbose=0,
408
+ positive=False,
409
+ ):
410
+ """Sparse coding without input/parameter validation."""
411
+
412
+ n_samples, n_features = X.shape
413
+ n_components = dictionary.shape[0]
414
+
415
+ if algorithm in ("lars", "omp"):
416
+ regularization = n_nonzero_coefs
417
+ if regularization is None:
418
+ regularization = min(max(n_features / 10, 1), n_components)
419
+ else:
420
+ regularization = alpha
421
+ if regularization is None:
422
+ regularization = 1.0
423
+
424
+ if gram is None and algorithm != "threshold":
425
+ gram = np.dot(dictionary, dictionary.T)
426
+
427
+ if cov is None and algorithm != "lasso_cd":
428
+ copy_cov = False
429
+ cov = np.dot(dictionary, X.T)
430
+
431
+ if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold":
432
+ code = _sparse_encode_precomputed(
433
+ X,
434
+ dictionary,
435
+ gram=gram,
436
+ cov=cov,
437
+ algorithm=algorithm,
438
+ regularization=regularization,
439
+ copy_cov=copy_cov,
440
+ init=init,
441
+ max_iter=max_iter,
442
+ verbose=verbose,
443
+ positive=positive,
444
+ )
445
+ return code
446
+
447
+ # Enter parallel code block
448
+ n_samples = X.shape[0]
449
+ n_components = dictionary.shape[0]
450
+ code = np.empty((n_samples, n_components))
451
+ slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
452
+
453
+ code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
454
+ delayed(_sparse_encode_precomputed)(
455
+ X[this_slice],
456
+ dictionary,
457
+ gram=gram,
458
+ cov=cov[:, this_slice] if cov is not None else None,
459
+ algorithm=algorithm,
460
+ regularization=regularization,
461
+ copy_cov=copy_cov,
462
+ init=init[this_slice] if init is not None else None,
463
+ max_iter=max_iter,
464
+ verbose=verbose,
465
+ positive=positive,
466
+ )
467
+ for this_slice in slices
468
+ )
469
+ for this_slice, this_view in zip(slices, code_views):
470
+ code[this_slice] = this_view
471
+ return code
472
+
473
+
474
+ def _update_dict(
475
+ dictionary,
476
+ Y,
477
+ code,
478
+ A=None,
479
+ B=None,
480
+ verbose=False,
481
+ random_state=None,
482
+ positive=False,
483
+ ):
484
+ """Update the dense dictionary factor in place.
485
+
486
+ Parameters
487
+ ----------
488
+ dictionary : ndarray of shape (n_components, n_features)
489
+ Value of the dictionary at the previous iteration.
490
+
491
+ Y : ndarray of shape (n_samples, n_features)
492
+ Data matrix.
493
+
494
+ code : ndarray of shape (n_samples, n_components)
495
+ Sparse coding of the data against which to optimize the dictionary.
496
+
497
+ A : ndarray of shape (n_components, n_components), default=None
498
+ Together with `B`, sufficient stats of the online model to update the
499
+ dictionary.
500
+
501
+ B : ndarray of shape (n_features, n_components), default=None
502
+ Together with `A`, sufficient stats of the online model to update the
503
+ dictionary.
504
+
505
+ verbose: bool, default=False
506
+ Degree of output the procedure will print.
507
+
508
+ random_state : int, RandomState instance or None, default=None
509
+ Used for randomly initializing the dictionary. Pass an int for
510
+ reproducible results across multiple function calls.
511
+ See :term:`Glossary <random_state>`.
512
+
513
+ positive : bool, default=False
514
+ Whether to enforce positivity when finding the dictionary.
515
+
516
+ .. versionadded:: 0.20
517
+ """
518
+ n_samples, n_components = code.shape
519
+ random_state = check_random_state(random_state)
520
+
521
+ if A is None:
522
+ A = code.T @ code
523
+ if B is None:
524
+ B = Y.T @ code
525
+
526
+ n_unused = 0
527
+
528
+ for k in range(n_components):
529
+ if A[k, k] > 1e-6:
530
+ # 1e-6 is arbitrary but consistent with the spams implementation
531
+ dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
532
+ else:
533
+ # kth atom is almost never used -> sample a new one from the data
534
+ newd = Y[random_state.choice(n_samples)]
535
+
536
+ # add small noise to avoid making the sparse coding ill conditioned
537
+ noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
538
+ noise = random_state.normal(0, noise_level, size=len(newd))
539
+
540
+ dictionary[k] = newd + noise
541
+ code[:, k] = 0
542
+ n_unused += 1
543
+
544
+ if positive:
545
+ np.clip(dictionary[k], 0, None, out=dictionary[k])
546
+
547
+ # Projection on the constraint set ||V_k|| <= 1
548
+ dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
549
+
550
+ if verbose and n_unused > 0:
551
+ print(f"{n_unused} unused atoms resampled.")
552
+
553
+
554
+ def _dict_learning(
555
+ X,
556
+ n_components,
557
+ *,
558
+ alpha,
559
+ max_iter,
560
+ tol,
561
+ method,
562
+ n_jobs,
563
+ dict_init,
564
+ code_init,
565
+ callback,
566
+ verbose,
567
+ random_state,
568
+ return_n_iter,
569
+ positive_dict,
570
+ positive_code,
571
+ method_max_iter,
572
+ ):
573
+ """Main dictionary learning algorithm"""
574
+ t0 = time.time()
575
+ # Init the code and the dictionary with SVD of Y
576
+ if code_init is not None and dict_init is not None:
577
+ code = np.array(code_init, order="F")
578
+ # Don't copy V, it will happen below
579
+ dictionary = dict_init
580
+ else:
581
+ code, S, dictionary = linalg.svd(X, full_matrices=False)
582
+ # flip the initial code's sign to enforce deterministic output
583
+ code, dictionary = svd_flip(code, dictionary)
584
+ dictionary = S[:, np.newaxis] * dictionary
585
+ r = len(dictionary)
586
+ if n_components <= r: # True even if n_components=None
587
+ code = code[:, :n_components]
588
+ dictionary = dictionary[:n_components, :]
589
+ else:
590
+ code = np.c_[code, np.zeros((len(code), n_components - r))]
591
+ dictionary = np.r_[
592
+ dictionary, np.zeros((n_components - r, dictionary.shape[1]))
593
+ ]
594
+
595
+ # Fortran-order dict better suited for the sparse coding which is the
596
+ # bottleneck of this algorithm.
597
+ dictionary = np.asfortranarray(dictionary)
598
+
599
+ errors = []
600
+ current_cost = np.nan
601
+
602
+ if verbose == 1:
603
+ print("[dict_learning]", end=" ")
604
+
605
+ # If max_iter is 0, number of iterations returned should be zero
606
+ ii = -1
607
+
608
+ for ii in range(max_iter):
609
+ dt = time.time() - t0
610
+ if verbose == 1:
611
+ sys.stdout.write(".")
612
+ sys.stdout.flush()
613
+ elif verbose:
614
+ print(
615
+ "Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
616
+ % (ii, dt, dt / 60, current_cost)
617
+ )
618
+
619
+ # Update code
620
+ code = sparse_encode(
621
+ X,
622
+ dictionary,
623
+ algorithm=method,
624
+ alpha=alpha,
625
+ init=code,
626
+ n_jobs=n_jobs,
627
+ positive=positive_code,
628
+ max_iter=method_max_iter,
629
+ verbose=verbose,
630
+ )
631
+
632
+ # Update dictionary in place
633
+ _update_dict(
634
+ dictionary,
635
+ X,
636
+ code,
637
+ verbose=verbose,
638
+ random_state=random_state,
639
+ positive=positive_dict,
640
+ )
641
+
642
+ # Cost function
643
+ current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum(
644
+ np.abs(code)
645
+ )
646
+ errors.append(current_cost)
647
+
648
+ if ii > 0:
649
+ dE = errors[-2] - errors[-1]
650
+ # assert(dE >= -tol * errors[-1])
651
+ if dE < tol * errors[-1]:
652
+ if verbose == 1:
653
+ # A line return
654
+ print("")
655
+ elif verbose:
656
+ print("--- Convergence reached after %d iterations" % ii)
657
+ break
658
+ if ii % 5 == 0 and callback is not None:
659
+ callback(locals())
660
+
661
+ if return_n_iter:
662
+ return code, dictionary, errors, ii + 1
663
+ else:
664
+ return code, dictionary, errors
665
+
666
+
667
+ def dict_learning_online(
668
+ X,
669
+ n_components=2,
670
+ *,
671
+ alpha=1,
672
+ max_iter=100,
673
+ return_code=True,
674
+ dict_init=None,
675
+ callback=None,
676
+ batch_size=256,
677
+ verbose=False,
678
+ shuffle=True,
679
+ n_jobs=None,
680
+ method="lars",
681
+ random_state=None,
682
+ positive_dict=False,
683
+ positive_code=False,
684
+ method_max_iter=1000,
685
+ tol=1e-3,
686
+ max_no_improvement=10,
687
+ ):
688
+ """Solve a dictionary learning matrix factorization problem online.
689
+
690
+ Finds the best dictionary and the corresponding sparse code for
691
+ approximating the data matrix X by solving::
692
+
693
+ (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
694
+ (U,V)
695
+ with || V_k ||_2 = 1 for all 0 <= k < n_components
696
+
697
+ where V is the dictionary and U is the sparse code. ||.||_Fro stands for
698
+ the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
699
+ which is the sum of the absolute values of all the entries in the matrix.
700
+ This is accomplished by repeatedly iterating over mini-batches by slicing
701
+ the input data.
702
+
703
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
704
+
705
+ Parameters
706
+ ----------
707
+ X : ndarray of shape (n_samples, n_features)
708
+ Data matrix.
709
+
710
+ n_components : int or None, default=2
711
+ Number of dictionary atoms to extract. If None, then ``n_components``
712
+ is set to ``n_features``.
713
+
714
+ alpha : float, default=1
715
+ Sparsity controlling parameter.
716
+
717
+ max_iter : int, default=100
718
+ Maximum number of iterations over the complete dataset before
719
+ stopping independently of any early stopping criterion heuristics.
720
+
721
+ .. versionadded:: 1.1
722
+
723
+ .. deprecated:: 1.4
724
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
725
+ Use the default value (i.e. `100`) instead.
726
+
727
+ return_code : bool, default=True
728
+ Whether to also return the code U or just the dictionary `V`.
729
+
730
+ dict_init : ndarray of shape (n_components, n_features), default=None
731
+ Initial values for the dictionary for warm restart scenarios.
732
+ If `None`, the initial values for the dictionary are created
733
+ with an SVD decomposition of the data via
734
+ :func:`~sklearn.utils.extmath.randomized_svd`.
735
+
736
+ callback : callable, default=None
737
+ A callable that gets invoked at the end of each iteration.
738
+
739
+ batch_size : int, default=256
740
+ The number of samples to take in each batch.
741
+
742
+ .. versionchanged:: 1.3
743
+ The default value of `batch_size` changed from 3 to 256 in version 1.3.
744
+
745
+ verbose : bool, default=False
746
+ To control the verbosity of the procedure.
747
+
748
+ shuffle : bool, default=True
749
+ Whether to shuffle the data before splitting it in batches.
750
+
751
+ n_jobs : int, default=None
752
+ Number of parallel jobs to run.
753
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
754
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
755
+ for more details.
756
+
757
+ method : {'lars', 'cd'}, default='lars'
758
+ * `'lars'`: uses the least angle regression method to solve the lasso
759
+ problem (`linear_model.lars_path`);
760
+ * `'cd'`: uses the coordinate descent method to compute the
761
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
762
+ the estimated components are sparse.
763
+
764
+ random_state : int, RandomState instance or None, default=None
765
+ Used for initializing the dictionary when ``dict_init`` is not
766
+ specified, randomly shuffling the data when ``shuffle`` is set to
767
+ ``True``, and updating the dictionary. Pass an int for reproducible
768
+ results across multiple function calls.
769
+ See :term:`Glossary <random_state>`.
770
+
771
+ positive_dict : bool, default=False
772
+ Whether to enforce positivity when finding the dictionary.
773
+
774
+ .. versionadded:: 0.20
775
+
776
+ positive_code : bool, default=False
777
+ Whether to enforce positivity when finding the code.
778
+
779
+ .. versionadded:: 0.20
780
+
781
+ method_max_iter : int, default=1000
782
+ Maximum number of iterations to perform when solving the lasso problem.
783
+
784
+ .. versionadded:: 0.22
785
+
786
+ tol : float, default=1e-3
787
+ Control early stopping based on the norm of the differences in the
788
+ dictionary between 2 steps.
789
+
790
+ To disable early stopping based on changes in the dictionary, set
791
+ `tol` to 0.0.
792
+
793
+ .. versionadded:: 1.1
794
+
795
+ max_no_improvement : int, default=10
796
+ Control early stopping based on the consecutive number of mini batches
797
+ that does not yield an improvement on the smoothed cost function.
798
+
799
+ To disable convergence detection based on cost function, set
800
+ `max_no_improvement` to None.
801
+
802
+ .. versionadded:: 1.1
803
+
804
+ Returns
805
+ -------
806
+ code : ndarray of shape (n_samples, n_components),
807
+ The sparse code (only returned if `return_code=True`).
808
+
809
+ dictionary : ndarray of shape (n_components, n_features),
810
+ The solutions to the dictionary learning problem.
811
+
812
+ n_iter : int
813
+ Number of iterations run. Returned only if `return_n_iter` is
814
+ set to `True`.
815
+
816
+ See Also
817
+ --------
818
+ dict_learning : Solve a dictionary learning matrix factorization problem.
819
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
820
+ MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary
821
+ learning algorithm.
822
+ SparsePCA : Sparse Principal Components Analysis.
823
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
824
+
825
+ Examples
826
+ --------
827
+ >>> import numpy as np
828
+ >>> from sklearn.datasets import make_sparse_coded_signal
829
+ >>> from sklearn.decomposition import dict_learning_online
830
+ >>> X, _, _ = make_sparse_coded_signal(
831
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
832
+ ... random_state=42,
833
+ ... )
834
+ >>> U, V = dict_learning_online(
835
+ ... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
836
+ ... )
837
+
838
+ We can check the level of sparsity of `U`:
839
+
840
+ >>> np.mean(U == 0)
841
+ 0.53...
842
+
843
+ We can compare the average squared euclidean norm of the reconstruction
844
+ error of the sparse coded signal relative to the squared euclidean norm of
845
+ the original signal:
846
+
847
+ >>> X_hat = U @ V
848
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
849
+ 0.05...
850
+ """
851
+ # TODO(1.6): remove in 1.6
852
+ if max_iter is None:
853
+ warn(
854
+ (
855
+ "`max_iter=None` is deprecated in version 1.4 and will be removed in "
856
+ "version 1.6. Use the default value (i.e. `100`) instead."
857
+ ),
858
+ FutureWarning,
859
+ )
860
+ max_iter = 100
861
+
862
+ transform_algorithm = "lasso_" + method
863
+
864
+ est = MiniBatchDictionaryLearning(
865
+ n_components=n_components,
866
+ alpha=alpha,
867
+ max_iter=max_iter,
868
+ n_jobs=n_jobs,
869
+ fit_algorithm=method,
870
+ batch_size=batch_size,
871
+ shuffle=shuffle,
872
+ dict_init=dict_init,
873
+ random_state=random_state,
874
+ transform_algorithm=transform_algorithm,
875
+ transform_alpha=alpha,
876
+ positive_code=positive_code,
877
+ positive_dict=positive_dict,
878
+ transform_max_iter=method_max_iter,
879
+ verbose=verbose,
880
+ callback=callback,
881
+ tol=tol,
882
+ max_no_improvement=max_no_improvement,
883
+ ).fit(X)
884
+
885
+ if not return_code:
886
+ return est.components_
887
+ else:
888
+ code = est.transform(X)
889
+ return code, est.components_
890
+
891
+
892
+ @validate_params(
893
+ {
894
+ "X": ["array-like"],
895
+ "method": [StrOptions({"lars", "cd"})],
896
+ "return_n_iter": ["boolean"],
897
+ "method_max_iter": [Interval(Integral, 0, None, closed="left")],
898
+ },
899
+ prefer_skip_nested_validation=False,
900
+ )
901
+ def dict_learning(
902
+ X,
903
+ n_components,
904
+ *,
905
+ alpha,
906
+ max_iter=100,
907
+ tol=1e-8,
908
+ method="lars",
909
+ n_jobs=None,
910
+ dict_init=None,
911
+ code_init=None,
912
+ callback=None,
913
+ verbose=False,
914
+ random_state=None,
915
+ return_n_iter=False,
916
+ positive_dict=False,
917
+ positive_code=False,
918
+ method_max_iter=1000,
919
+ ):
920
+ """Solve a dictionary learning matrix factorization problem.
921
+
922
+ Finds the best dictionary and the corresponding sparse code for
923
+ approximating the data matrix X by solving::
924
+
925
+ (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
926
+ (U,V)
927
+ with || V_k ||_2 = 1 for all 0 <= k < n_components
928
+
929
+ where V is the dictionary and U is the sparse code. ||.||_Fro stands for
930
+ the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
931
+ which is the sum of the absolute values of all the entries in the matrix.
932
+
933
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
934
+
935
+ Parameters
936
+ ----------
937
+ X : array-like of shape (n_samples, n_features)
938
+ Data matrix.
939
+
940
+ n_components : int
941
+ Number of dictionary atoms to extract.
942
+
943
+ alpha : int or float
944
+ Sparsity controlling parameter.
945
+
946
+ max_iter : int, default=100
947
+ Maximum number of iterations to perform.
948
+
949
+ tol : float, default=1e-8
950
+ Tolerance for the stopping condition.
951
+
952
+ method : {'lars', 'cd'}, default='lars'
953
+ The method used:
954
+
955
+ * `'lars'`: uses the least angle regression method to solve the lasso
956
+ problem (`linear_model.lars_path`);
957
+ * `'cd'`: uses the coordinate descent method to compute the
958
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
959
+ the estimated components are sparse.
960
+
961
+ n_jobs : int, default=None
962
+ Number of parallel jobs to run.
963
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
964
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
965
+ for more details.
966
+
967
+ dict_init : ndarray of shape (n_components, n_features), default=None
968
+ Initial value for the dictionary for warm restart scenarios. Only used
969
+ if `code_init` and `dict_init` are not None.
970
+
971
+ code_init : ndarray of shape (n_samples, n_components), default=None
972
+ Initial value for the sparse code for warm restart scenarios. Only used
973
+ if `code_init` and `dict_init` are not None.
974
+
975
+ callback : callable, default=None
976
+ Callable that gets invoked every five iterations.
977
+
978
+ verbose : bool, default=False
979
+ To control the verbosity of the procedure.
980
+
981
+ random_state : int, RandomState instance or None, default=None
982
+ Used for randomly initializing the dictionary. Pass an int for
983
+ reproducible results across multiple function calls.
984
+ See :term:`Glossary <random_state>`.
985
+
986
+ return_n_iter : bool, default=False
987
+ Whether or not to return the number of iterations.
988
+
989
+ positive_dict : bool, default=False
990
+ Whether to enforce positivity when finding the dictionary.
991
+
992
+ .. versionadded:: 0.20
993
+
994
+ positive_code : bool, default=False
995
+ Whether to enforce positivity when finding the code.
996
+
997
+ .. versionadded:: 0.20
998
+
999
+ method_max_iter : int, default=1000
1000
+ Maximum number of iterations to perform.
1001
+
1002
+ .. versionadded:: 0.22
1003
+
1004
+ Returns
1005
+ -------
1006
+ code : ndarray of shape (n_samples, n_components)
1007
+ The sparse code factor in the matrix factorization.
1008
+
1009
+ dictionary : ndarray of shape (n_components, n_features),
1010
+ The dictionary factor in the matrix factorization.
1011
+
1012
+ errors : array
1013
+ Vector of errors at each iteration.
1014
+
1015
+ n_iter : int
1016
+ Number of iterations run. Returned only if `return_n_iter` is
1017
+ set to True.
1018
+
1019
+ See Also
1020
+ --------
1021
+ dict_learning_online : Solve a dictionary learning matrix factorization
1022
+ problem online.
1023
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1024
+ MiniBatchDictionaryLearning : A faster, less accurate version
1025
+ of the dictionary learning algorithm.
1026
+ SparsePCA : Sparse Principal Components Analysis.
1027
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1028
+
1029
+ Examples
1030
+ --------
1031
+ >>> import numpy as np
1032
+ >>> from sklearn.datasets import make_sparse_coded_signal
1033
+ >>> from sklearn.decomposition import dict_learning
1034
+ >>> X, _, _ = make_sparse_coded_signal(
1035
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1036
+ ... random_state=42,
1037
+ ... )
1038
+ >>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)
1039
+
1040
+ We can check the level of sparsity of `U`:
1041
+
1042
+ >>> np.mean(U == 0)
1043
+ 0.6...
1044
+
1045
+ We can compare the average squared euclidean norm of the reconstruction
1046
+ error of the sparse coded signal relative to the squared euclidean norm of
1047
+ the original signal:
1048
+
1049
+ >>> X_hat = U @ V
1050
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1051
+ 0.01...
1052
+ """
1053
+ estimator = DictionaryLearning(
1054
+ n_components=n_components,
1055
+ alpha=alpha,
1056
+ max_iter=max_iter,
1057
+ tol=tol,
1058
+ fit_algorithm=method,
1059
+ n_jobs=n_jobs,
1060
+ dict_init=dict_init,
1061
+ callback=callback,
1062
+ code_init=code_init,
1063
+ verbose=verbose,
1064
+ random_state=random_state,
1065
+ positive_code=positive_code,
1066
+ positive_dict=positive_dict,
1067
+ transform_max_iter=method_max_iter,
1068
+ ).set_output(transform="default")
1069
+ code = estimator.fit_transform(X)
1070
+ if return_n_iter:
1071
+ return (
1072
+ code,
1073
+ estimator.components_,
1074
+ estimator.error_,
1075
+ estimator.n_iter_,
1076
+ )
1077
+ return code, estimator.components_, estimator.error_
1078
+
1079
+
1080
+ class _BaseSparseCoding(ClassNamePrefixFeaturesOutMixin, TransformerMixin):
1081
+ """Base class from SparseCoder and DictionaryLearning algorithms."""
1082
+
1083
+ def __init__(
1084
+ self,
1085
+ transform_algorithm,
1086
+ transform_n_nonzero_coefs,
1087
+ transform_alpha,
1088
+ split_sign,
1089
+ n_jobs,
1090
+ positive_code,
1091
+ transform_max_iter,
1092
+ ):
1093
+ self.transform_algorithm = transform_algorithm
1094
+ self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
1095
+ self.transform_alpha = transform_alpha
1096
+ self.transform_max_iter = transform_max_iter
1097
+ self.split_sign = split_sign
1098
+ self.n_jobs = n_jobs
1099
+ self.positive_code = positive_code
1100
+
1101
+ def _transform(self, X, dictionary):
1102
+ """Private method allowing to accommodate both DictionaryLearning and
1103
+ SparseCoder."""
1104
+ X = self._validate_data(X, reset=False)
1105
+
1106
+ if hasattr(self, "alpha") and self.transform_alpha is None:
1107
+ transform_alpha = self.alpha
1108
+ else:
1109
+ transform_alpha = self.transform_alpha
1110
+
1111
+ code = sparse_encode(
1112
+ X,
1113
+ dictionary,
1114
+ algorithm=self.transform_algorithm,
1115
+ n_nonzero_coefs=self.transform_n_nonzero_coefs,
1116
+ alpha=transform_alpha,
1117
+ max_iter=self.transform_max_iter,
1118
+ n_jobs=self.n_jobs,
1119
+ positive=self.positive_code,
1120
+ )
1121
+
1122
+ if self.split_sign:
1123
+ # feature vector is split into a positive and negative side
1124
+ n_samples, n_features = code.shape
1125
+ split_code = np.empty((n_samples, 2 * n_features))
1126
+ split_code[:, :n_features] = np.maximum(code, 0)
1127
+ split_code[:, n_features:] = -np.minimum(code, 0)
1128
+ code = split_code
1129
+
1130
+ return code
1131
+
1132
+ def transform(self, X):
1133
+ """Encode the data as a sparse combination of the dictionary atoms.
1134
+
1135
+ Coding method is determined by the object parameter
1136
+ `transform_algorithm`.
1137
+
1138
+ Parameters
1139
+ ----------
1140
+ X : ndarray of shape (n_samples, n_features)
1141
+ Test data to be transformed, must have the same number of
1142
+ features as the data used to train the model.
1143
+
1144
+ Returns
1145
+ -------
1146
+ X_new : ndarray of shape (n_samples, n_components)
1147
+ Transformed data.
1148
+ """
1149
+ check_is_fitted(self)
1150
+ return self._transform(X, self.components_)
1151
+
1152
+
1153
+ class SparseCoder(_BaseSparseCoding, BaseEstimator):
1154
+ """Sparse coding.
1155
+
1156
+ Finds a sparse representation of data against a fixed, precomputed
1157
+ dictionary.
1158
+
1159
+ Each row of the result is the solution to a sparse coding problem.
1160
+ The goal is to find a sparse array `code` such that::
1161
+
1162
+ X ~= code * dictionary
1163
+
1164
+ Read more in the :ref:`User Guide <SparseCoder>`.
1165
+
1166
+ Parameters
1167
+ ----------
1168
+ dictionary : ndarray of shape (n_components, n_features)
1169
+ The dictionary atoms used for sparse coding. Lines are assumed to be
1170
+ normalized to unit norm.
1171
+
1172
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1173
+ 'threshold'}, default='omp'
1174
+ Algorithm used to transform the data:
1175
+
1176
+ - `'lars'`: uses the least angle regression method
1177
+ (`linear_model.lars_path`);
1178
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution;
1179
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1180
+ Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
1181
+ the estimated components are sparse;
1182
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1183
+ solution;
1184
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1185
+ the projection ``dictionary * X'``.
1186
+
1187
+ transform_n_nonzero_coefs : int, default=None
1188
+ Number of nonzero coefficients to target in each column of the
1189
+ solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
1190
+ and is overridden by `alpha` in the `omp` case. If `None`, then
1191
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1192
+
1193
+ transform_alpha : float, default=None
1194
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1195
+ penalty applied to the L1 norm.
1196
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1197
+ threshold below which coefficients will be squashed to zero.
1198
+ If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
1199
+ the reconstruction error targeted. In this case, it overrides
1200
+ `n_nonzero_coefs`.
1201
+ If `None`, default to 1.
1202
+
1203
+ split_sign : bool, default=False
1204
+ Whether to split the sparse feature vector into the concatenation of
1205
+ its negative part and its positive part. This can improve the
1206
+ performance of downstream classifiers.
1207
+
1208
+ n_jobs : int, default=None
1209
+ Number of parallel jobs to run.
1210
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1211
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1212
+ for more details.
1213
+
1214
+ positive_code : bool, default=False
1215
+ Whether to enforce positivity when finding the code.
1216
+
1217
+ .. versionadded:: 0.20
1218
+
1219
+ transform_max_iter : int, default=1000
1220
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1221
+ `lasso_lars`.
1222
+
1223
+ .. versionadded:: 0.22
1224
+
1225
+ Attributes
1226
+ ----------
1227
+ n_components_ : int
1228
+ Number of atoms.
1229
+
1230
+ n_features_in_ : int
1231
+ Number of features seen during :term:`fit`.
1232
+
1233
+ .. versionadded:: 0.24
1234
+
1235
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1236
+ Names of features seen during :term:`fit`. Defined only when `X`
1237
+ has feature names that are all strings.
1238
+
1239
+ .. versionadded:: 1.0
1240
+
1241
+ See Also
1242
+ --------
1243
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1244
+ MiniBatchDictionaryLearning : A faster, less accurate, version of the
1245
+ dictionary learning algorithm.
1246
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1247
+ SparsePCA : Sparse Principal Components Analysis.
1248
+ sparse_encode : Sparse coding where each row of the result is the solution
1249
+ to a sparse coding problem.
1250
+
1251
+ Examples
1252
+ --------
1253
+ >>> import numpy as np
1254
+ >>> from sklearn.decomposition import SparseCoder
1255
+ >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
1256
+ >>> dictionary = np.array(
1257
+ ... [[0, 1, 0],
1258
+ ... [-1, -1, 2],
1259
+ ... [1, 1, 1],
1260
+ ... [0, 1, 1],
1261
+ ... [0, 2, 1]],
1262
+ ... dtype=np.float64
1263
+ ... )
1264
+ >>> coder = SparseCoder(
1265
+ ... dictionary=dictionary, transform_algorithm='lasso_lars',
1266
+ ... transform_alpha=1e-10,
1267
+ ... )
1268
+ >>> coder.transform(X)
1269
+ array([[ 0., 0., -1., 0., 0.],
1270
+ [ 0., 1., 1., 0., 0.]])
1271
+ """
1272
+
1273
+ _required_parameters = ["dictionary"]
1274
+
1275
+ def __init__(
1276
+ self,
1277
+ dictionary,
1278
+ *,
1279
+ transform_algorithm="omp",
1280
+ transform_n_nonzero_coefs=None,
1281
+ transform_alpha=None,
1282
+ split_sign=False,
1283
+ n_jobs=None,
1284
+ positive_code=False,
1285
+ transform_max_iter=1000,
1286
+ ):
1287
+ super().__init__(
1288
+ transform_algorithm,
1289
+ transform_n_nonzero_coefs,
1290
+ transform_alpha,
1291
+ split_sign,
1292
+ n_jobs,
1293
+ positive_code,
1294
+ transform_max_iter,
1295
+ )
1296
+ self.dictionary = dictionary
1297
+
1298
+ def fit(self, X, y=None):
1299
+ """Do nothing and return the estimator unchanged.
1300
+
1301
+ This method is just there to implement the usual API and hence
1302
+ work in pipelines.
1303
+
1304
+ Parameters
1305
+ ----------
1306
+ X : Ignored
1307
+ Not used, present for API consistency by convention.
1308
+
1309
+ y : Ignored
1310
+ Not used, present for API consistency by convention.
1311
+
1312
+ Returns
1313
+ -------
1314
+ self : object
1315
+ Returns the instance itself.
1316
+ """
1317
+ return self
1318
+
1319
+ def transform(self, X, y=None):
1320
+ """Encode the data as a sparse combination of the dictionary atoms.
1321
+
1322
+ Coding method is determined by the object parameter
1323
+ `transform_algorithm`.
1324
+
1325
+ Parameters
1326
+ ----------
1327
+ X : ndarray of shape (n_samples, n_features)
1328
+ Training vector, where `n_samples` is the number of samples
1329
+ and `n_features` is the number of features.
1330
+
1331
+ y : Ignored
1332
+ Not used, present for API consistency by convention.
1333
+
1334
+ Returns
1335
+ -------
1336
+ X_new : ndarray of shape (n_samples, n_components)
1337
+ Transformed data.
1338
+ """
1339
+ return super()._transform(X, self.dictionary)
1340
+
1341
+ def _more_tags(self):
1342
+ return {
1343
+ "requires_fit": False,
1344
+ "preserves_dtype": [np.float64, np.float32],
1345
+ }
1346
+
1347
+ @property
1348
+ def n_components_(self):
1349
+ """Number of atoms."""
1350
+ return self.dictionary.shape[0]
1351
+
1352
+ @property
1353
+ def n_features_in_(self):
1354
+ """Number of features seen during `fit`."""
1355
+ return self.dictionary.shape[1]
1356
+
1357
+ @property
1358
+ def _n_features_out(self):
1359
+ """Number of transformed output features."""
1360
+ return self.n_components_
1361
+
1362
+
1363
+ class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
1364
+ """Dictionary learning.
1365
+
1366
+ Finds a dictionary (a set of atoms) that performs well at sparsely
1367
+ encoding the fitted data.
1368
+
1369
+ Solves the optimization problem::
1370
+
1371
+ (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
1372
+ (U,V)
1373
+ with || V_k ||_2 <= 1 for all 0 <= k < n_components
1374
+
1375
+ ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
1376
+ the entry-wise matrix norm which is the sum of the absolute values
1377
+ of all the entries in the matrix.
1378
+
1379
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
1380
+
1381
+ Parameters
1382
+ ----------
1383
+ n_components : int, default=None
1384
+ Number of dictionary elements to extract. If None, then ``n_components``
1385
+ is set to ``n_features``.
1386
+
1387
+ alpha : float, default=1.0
1388
+ Sparsity controlling parameter.
1389
+
1390
+ max_iter : int, default=1000
1391
+ Maximum number of iterations to perform.
1392
+
1393
+ tol : float, default=1e-8
1394
+ Tolerance for numerical error.
1395
+
1396
+ fit_algorithm : {'lars', 'cd'}, default='lars'
1397
+ * `'lars'`: uses the least angle regression method to solve the lasso
1398
+ problem (:func:`~sklearn.linear_model.lars_path`);
1399
+ * `'cd'`: uses the coordinate descent method to compute the
1400
+ Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be
1401
+ faster if the estimated components are sparse.
1402
+
1403
+ .. versionadded:: 0.17
1404
+ *cd* coordinate descent method to improve speed.
1405
+
1406
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1407
+ 'threshold'}, default='omp'
1408
+ Algorithm used to transform the data:
1409
+
1410
+ - `'lars'`: uses the least angle regression method
1411
+ (:func:`~sklearn.linear_model.lars_path`);
1412
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution.
1413
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1414
+ Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`
1415
+ will be faster if the estimated components are sparse.
1416
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1417
+ solution.
1418
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1419
+ the projection ``dictionary * X'``.
1420
+
1421
+ .. versionadded:: 0.17
1422
+ *lasso_cd* coordinate descent method to improve speed.
1423
+
1424
+ transform_n_nonzero_coefs : int, default=None
1425
+ Number of nonzero coefficients to target in each column of the
1426
+ solution. This is only used by `algorithm='lars'` and
1427
+ `algorithm='omp'`. If `None`, then
1428
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1429
+
1430
+ transform_alpha : float, default=None
1431
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1432
+ penalty applied to the L1 norm.
1433
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1434
+ threshold below which coefficients will be squashed to zero.
1435
+ If `None`, defaults to `alpha`.
1436
+
1437
+ .. versionchanged:: 1.2
1438
+ When None, default value changed from 1.0 to `alpha`.
1439
+
1440
+ n_jobs : int or None, default=None
1441
+ Number of parallel jobs to run.
1442
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1443
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1444
+ for more details.
1445
+
1446
+ code_init : ndarray of shape (n_samples, n_components), default=None
1447
+ Initial value for the code, for warm restart. Only used if `code_init`
1448
+ and `dict_init` are not None.
1449
+
1450
+ dict_init : ndarray of shape (n_components, n_features), default=None
1451
+ Initial values for the dictionary, for warm restart. Only used if
1452
+ `code_init` and `dict_init` are not None.
1453
+
1454
+ callback : callable, default=None
1455
+ Callable that gets invoked every five iterations.
1456
+
1457
+ .. versionadded:: 1.3
1458
+
1459
+ verbose : bool, default=False
1460
+ To control the verbosity of the procedure.
1461
+
1462
+ split_sign : bool, default=False
1463
+ Whether to split the sparse feature vector into the concatenation of
1464
+ its negative part and its positive part. This can improve the
1465
+ performance of downstream classifiers.
1466
+
1467
+ random_state : int, RandomState instance or None, default=None
1468
+ Used for initializing the dictionary when ``dict_init`` is not
1469
+ specified, randomly shuffling the data when ``shuffle`` is set to
1470
+ ``True``, and updating the dictionary. Pass an int for reproducible
1471
+ results across multiple function calls.
1472
+ See :term:`Glossary <random_state>`.
1473
+
1474
+ positive_code : bool, default=False
1475
+ Whether to enforce positivity when finding the code.
1476
+
1477
+ .. versionadded:: 0.20
1478
+
1479
+ positive_dict : bool, default=False
1480
+ Whether to enforce positivity when finding the dictionary.
1481
+
1482
+ .. versionadded:: 0.20
1483
+
1484
+ transform_max_iter : int, default=1000
1485
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1486
+ `'lasso_lars'`.
1487
+
1488
+ .. versionadded:: 0.22
1489
+
1490
+ Attributes
1491
+ ----------
1492
+ components_ : ndarray of shape (n_components, n_features)
1493
+ dictionary atoms extracted from the data
1494
+
1495
+ error_ : array
1496
+ vector of errors at each iteration
1497
+
1498
+ n_features_in_ : int
1499
+ Number of features seen during :term:`fit`.
1500
+
1501
+ .. versionadded:: 0.24
1502
+
1503
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1504
+ Names of features seen during :term:`fit`. Defined only when `X`
1505
+ has feature names that are all strings.
1506
+
1507
+ .. versionadded:: 1.0
1508
+
1509
+ n_iter_ : int
1510
+ Number of iterations run.
1511
+
1512
+ See Also
1513
+ --------
1514
+ MiniBatchDictionaryLearning: A faster, less accurate, version of the
1515
+ dictionary learning algorithm.
1516
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1517
+ SparseCoder : Find a sparse representation of data from a fixed,
1518
+ precomputed dictionary.
1519
+ SparsePCA : Sparse Principal Components Analysis.
1520
+
1521
+ References
1522
+ ----------
1523
+
1524
+ J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
1525
+ for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
1526
+
1527
+ Examples
1528
+ --------
1529
+ >>> import numpy as np
1530
+ >>> from sklearn.datasets import make_sparse_coded_signal
1531
+ >>> from sklearn.decomposition import DictionaryLearning
1532
+ >>> X, dictionary, code = make_sparse_coded_signal(
1533
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1534
+ ... random_state=42,
1535
+ ... )
1536
+ >>> dict_learner = DictionaryLearning(
1537
+ ... n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.1,
1538
+ ... random_state=42,
1539
+ ... )
1540
+ >>> X_transformed = dict_learner.fit(X).transform(X)
1541
+
1542
+ We can check the level of sparsity of `X_transformed`:
1543
+
1544
+ >>> np.mean(X_transformed == 0)
1545
+ 0.52...
1546
+
1547
+ We can compare the average squared euclidean norm of the reconstruction
1548
+ error of the sparse coded signal relative to the squared euclidean norm of
1549
+ the original signal:
1550
+
1551
+ >>> X_hat = X_transformed @ dict_learner.components_
1552
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1553
+ 0.05...
1554
+ """
1555
+
1556
+ _parameter_constraints: dict = {
1557
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
1558
+ "alpha": [Interval(Real, 0, None, closed="left")],
1559
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
1560
+ "tol": [Interval(Real, 0, None, closed="left")],
1561
+ "fit_algorithm": [StrOptions({"lars", "cd"})],
1562
+ "transform_algorithm": [
1563
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
1564
+ ],
1565
+ "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
1566
+ "transform_alpha": [Interval(Real, 0, None, closed="left"), None],
1567
+ "n_jobs": [Integral, None],
1568
+ "code_init": [np.ndarray, None],
1569
+ "dict_init": [np.ndarray, None],
1570
+ "callback": [callable, None],
1571
+ "verbose": ["verbose"],
1572
+ "split_sign": ["boolean"],
1573
+ "random_state": ["random_state"],
1574
+ "positive_code": ["boolean"],
1575
+ "positive_dict": ["boolean"],
1576
+ "transform_max_iter": [Interval(Integral, 0, None, closed="left")],
1577
+ }
1578
+
1579
+ def __init__(
1580
+ self,
1581
+ n_components=None,
1582
+ *,
1583
+ alpha=1,
1584
+ max_iter=1000,
1585
+ tol=1e-8,
1586
+ fit_algorithm="lars",
1587
+ transform_algorithm="omp",
1588
+ transform_n_nonzero_coefs=None,
1589
+ transform_alpha=None,
1590
+ n_jobs=None,
1591
+ code_init=None,
1592
+ dict_init=None,
1593
+ callback=None,
1594
+ verbose=False,
1595
+ split_sign=False,
1596
+ random_state=None,
1597
+ positive_code=False,
1598
+ positive_dict=False,
1599
+ transform_max_iter=1000,
1600
+ ):
1601
+ super().__init__(
1602
+ transform_algorithm,
1603
+ transform_n_nonzero_coefs,
1604
+ transform_alpha,
1605
+ split_sign,
1606
+ n_jobs,
1607
+ positive_code,
1608
+ transform_max_iter,
1609
+ )
1610
+ self.n_components = n_components
1611
+ self.alpha = alpha
1612
+ self.max_iter = max_iter
1613
+ self.tol = tol
1614
+ self.fit_algorithm = fit_algorithm
1615
+ self.code_init = code_init
1616
+ self.dict_init = dict_init
1617
+ self.callback = callback
1618
+ self.verbose = verbose
1619
+ self.random_state = random_state
1620
+ self.positive_dict = positive_dict
1621
+
1622
+ def fit(self, X, y=None):
1623
+ """Fit the model from data in X.
1624
+
1625
+ Parameters
1626
+ ----------
1627
+ X : array-like of shape (n_samples, n_features)
1628
+ Training vector, where `n_samples` is the number of samples
1629
+ and `n_features` is the number of features.
1630
+
1631
+ y : Ignored
1632
+ Not used, present for API consistency by convention.
1633
+
1634
+ Returns
1635
+ -------
1636
+ self : object
1637
+ Returns the instance itself.
1638
+ """
1639
+ self.fit_transform(X)
1640
+ return self
1641
+
1642
+ @_fit_context(prefer_skip_nested_validation=True)
1643
+ def fit_transform(self, X, y=None):
1644
+ """Fit the model from data in X and return the transformed data.
1645
+
1646
+ Parameters
1647
+ ----------
1648
+ X : array-like of shape (n_samples, n_features)
1649
+ Training vector, where `n_samples` is the number of samples
1650
+ and `n_features` is the number of features.
1651
+
1652
+ y : Ignored
1653
+ Not used, present for API consistency by convention.
1654
+
1655
+ Returns
1656
+ -------
1657
+ V : ndarray of shape (n_samples, n_components)
1658
+ Transformed data.
1659
+ """
1660
+ _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code)
1661
+
1662
+ method = "lasso_" + self.fit_algorithm
1663
+
1664
+ random_state = check_random_state(self.random_state)
1665
+ X = self._validate_data(X)
1666
+
1667
+ if self.n_components is None:
1668
+ n_components = X.shape[1]
1669
+ else:
1670
+ n_components = self.n_components
1671
+
1672
+ V, U, E, self.n_iter_ = _dict_learning(
1673
+ X,
1674
+ n_components,
1675
+ alpha=self.alpha,
1676
+ tol=self.tol,
1677
+ max_iter=self.max_iter,
1678
+ method=method,
1679
+ method_max_iter=self.transform_max_iter,
1680
+ n_jobs=self.n_jobs,
1681
+ code_init=self.code_init,
1682
+ dict_init=self.dict_init,
1683
+ callback=self.callback,
1684
+ verbose=self.verbose,
1685
+ random_state=random_state,
1686
+ return_n_iter=True,
1687
+ positive_dict=self.positive_dict,
1688
+ positive_code=self.positive_code,
1689
+ )
1690
+ self.components_ = U
1691
+ self.error_ = E
1692
+
1693
+ return V
1694
+
1695
+ @property
1696
+ def _n_features_out(self):
1697
+ """Number of transformed output features."""
1698
+ return self.components_.shape[0]
1699
+
1700
+ def _more_tags(self):
1701
+ return {
1702
+ "preserves_dtype": [np.float64, np.float32],
1703
+ }
1704
+
1705
+
1706
+ class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
1707
+ """Mini-batch dictionary learning.
1708
+
1709
+ Finds a dictionary (a set of atoms) that performs well at sparsely
1710
+ encoding the fitted data.
1711
+
1712
+ Solves the optimization problem::
1713
+
1714
+ (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
1715
+ (U,V)
1716
+ with || V_k ||_2 <= 1 for all 0 <= k < n_components
1717
+
1718
+ ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
1719
+ the entry-wise matrix norm which is the sum of the absolute values
1720
+ of all the entries in the matrix.
1721
+
1722
+ Read more in the :ref:`User Guide <DictionaryLearning>`.
1723
+
1724
+ Parameters
1725
+ ----------
1726
+ n_components : int, default=None
1727
+ Number of dictionary elements to extract.
1728
+
1729
+ alpha : float, default=1
1730
+ Sparsity controlling parameter.
1731
+
1732
+ max_iter : int, default=1_000
1733
+ Maximum number of iterations over the complete dataset before
1734
+ stopping independently of any early stopping criterion heuristics.
1735
+
1736
+ .. versionadded:: 1.1
1737
+
1738
+ .. deprecated:: 1.4
1739
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
1740
+ Use the default value (i.e. `1_000`) instead.
1741
+
1742
+ fit_algorithm : {'lars', 'cd'}, default='lars'
1743
+ The algorithm used:
1744
+
1745
+ - `'lars'`: uses the least angle regression method to solve the lasso
1746
+ problem (`linear_model.lars_path`)
1747
+ - `'cd'`: uses the coordinate descent method to compute the
1748
+ Lasso solution (`linear_model.Lasso`). Lars will be faster if
1749
+ the estimated components are sparse.
1750
+
1751
+ n_jobs : int, default=None
1752
+ Number of parallel jobs to run.
1753
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1754
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1755
+ for more details.
1756
+
1757
+ batch_size : int, default=256
1758
+ Number of samples in each mini-batch.
1759
+
1760
+ .. versionchanged:: 1.3
1761
+ The default value of `batch_size` changed from 3 to 256 in version 1.3.
1762
+
1763
+ shuffle : bool, default=True
1764
+ Whether to shuffle the samples before forming batches.
1765
+
1766
+ dict_init : ndarray of shape (n_components, n_features), default=None
1767
+ Initial value of the dictionary for warm restart scenarios.
1768
+
1769
+ transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
1770
+ 'threshold'}, default='omp'
1771
+ Algorithm used to transform the data:
1772
+
1773
+ - `'lars'`: uses the least angle regression method
1774
+ (`linear_model.lars_path`);
1775
+ - `'lasso_lars'`: uses Lars to compute the Lasso solution.
1776
+ - `'lasso_cd'`: uses the coordinate descent method to compute the
1777
+ Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
1778
+ if the estimated components are sparse.
1779
+ - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
1780
+ solution.
1781
+ - `'threshold'`: squashes to zero all coefficients less than alpha from
1782
+ the projection ``dictionary * X'``.
1783
+
1784
+ transform_n_nonzero_coefs : int, default=None
1785
+ Number of nonzero coefficients to target in each column of the
1786
+ solution. This is only used by `algorithm='lars'` and
1787
+ `algorithm='omp'`. If `None`, then
1788
+ `transform_n_nonzero_coefs=int(n_features / 10)`.
1789
+
1790
+ transform_alpha : float, default=None
1791
+ If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
1792
+ penalty applied to the L1 norm.
1793
+ If `algorithm='threshold'`, `alpha` is the absolute value of the
1794
+ threshold below which coefficients will be squashed to zero.
1795
+ If `None`, defaults to `alpha`.
1796
+
1797
+ .. versionchanged:: 1.2
1798
+ When None, default value changed from 1.0 to `alpha`.
1799
+
1800
+ verbose : bool or int, default=False
1801
+ To control the verbosity of the procedure.
1802
+
1803
+ split_sign : bool, default=False
1804
+ Whether to split the sparse feature vector into the concatenation of
1805
+ its negative part and its positive part. This can improve the
1806
+ performance of downstream classifiers.
1807
+
1808
+ random_state : int, RandomState instance or None, default=None
1809
+ Used for initializing the dictionary when ``dict_init`` is not
1810
+ specified, randomly shuffling the data when ``shuffle`` is set to
1811
+ ``True``, and updating the dictionary. Pass an int for reproducible
1812
+ results across multiple function calls.
1813
+ See :term:`Glossary <random_state>`.
1814
+
1815
+ positive_code : bool, default=False
1816
+ Whether to enforce positivity when finding the code.
1817
+
1818
+ .. versionadded:: 0.20
1819
+
1820
+ positive_dict : bool, default=False
1821
+ Whether to enforce positivity when finding the dictionary.
1822
+
1823
+ .. versionadded:: 0.20
1824
+
1825
+ transform_max_iter : int, default=1000
1826
+ Maximum number of iterations to perform if `algorithm='lasso_cd'` or
1827
+ `'lasso_lars'`.
1828
+
1829
+ .. versionadded:: 0.22
1830
+
1831
+ callback : callable, default=None
1832
+ A callable that gets invoked at the end of each iteration.
1833
+
1834
+ .. versionadded:: 1.1
1835
+
1836
+ tol : float, default=1e-3
1837
+ Control early stopping based on the norm of the differences in the
1838
+ dictionary between 2 steps.
1839
+
1840
+ To disable early stopping based on changes in the dictionary, set
1841
+ `tol` to 0.0.
1842
+
1843
+ .. versionadded:: 1.1
1844
+
1845
+ max_no_improvement : int, default=10
1846
+ Control early stopping based on the consecutive number of mini batches
1847
+ that does not yield an improvement on the smoothed cost function.
1848
+
1849
+ To disable convergence detection based on cost function, set
1850
+ `max_no_improvement` to None.
1851
+
1852
+ .. versionadded:: 1.1
1853
+
1854
+ Attributes
1855
+ ----------
1856
+ components_ : ndarray of shape (n_components, n_features)
1857
+ Components extracted from the data.
1858
+
1859
+ n_features_in_ : int
1860
+ Number of features seen during :term:`fit`.
1861
+
1862
+ .. versionadded:: 0.24
1863
+
1864
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1865
+ Names of features seen during :term:`fit`. Defined only when `X`
1866
+ has feature names that are all strings.
1867
+
1868
+ .. versionadded:: 1.0
1869
+
1870
+ n_iter_ : int
1871
+ Number of iterations over the full dataset.
1872
+
1873
+ n_steps_ : int
1874
+ Number of mini-batches processed.
1875
+
1876
+ .. versionadded:: 1.1
1877
+
1878
+ See Also
1879
+ --------
1880
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1881
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1882
+ SparseCoder : Find a sparse representation of data from a fixed,
1883
+ precomputed dictionary.
1884
+ SparsePCA : Sparse Principal Components Analysis.
1885
+
1886
+ References
1887
+ ----------
1888
+
1889
+ J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
1890
+ for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
1891
+
1892
+ Examples
1893
+ --------
1894
+ >>> import numpy as np
1895
+ >>> from sklearn.datasets import make_sparse_coded_signal
1896
+ >>> from sklearn.decomposition import MiniBatchDictionaryLearning
1897
+ >>> X, dictionary, code = make_sparse_coded_signal(
1898
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1899
+ ... random_state=42)
1900
+ >>> dict_learner = MiniBatchDictionaryLearning(
1901
+ ... n_components=15, batch_size=3, transform_algorithm='lasso_lars',
1902
+ ... transform_alpha=0.1, max_iter=20, random_state=42)
1903
+ >>> X_transformed = dict_learner.fit_transform(X)
1904
+
1905
+ We can check the level of sparsity of `X_transformed`:
1906
+
1907
+ >>> np.mean(X_transformed == 0) > 0.5
1908
+ True
1909
+
1910
+ We can compare the average squared euclidean norm of the reconstruction
1911
+ error of the sparse coded signal relative to the squared euclidean norm of
1912
+ the original signal:
1913
+
1914
+ >>> X_hat = X_transformed @ dict_learner.components_
1915
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1916
+ 0.052...
1917
+ """
1918
+
1919
+ _parameter_constraints: dict = {
1920
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
1921
+ "alpha": [Interval(Real, 0, None, closed="left")],
1922
+ "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)],
1923
+ "fit_algorithm": [StrOptions({"cd", "lars"})],
1924
+ "n_jobs": [None, Integral],
1925
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
1926
+ "shuffle": ["boolean"],
1927
+ "dict_init": [None, np.ndarray],
1928
+ "transform_algorithm": [
1929
+ StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
1930
+ ],
1931
+ "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
1932
+ "transform_alpha": [Interval(Real, 0, None, closed="left"), None],
1933
+ "verbose": ["verbose"],
1934
+ "split_sign": ["boolean"],
1935
+ "random_state": ["random_state"],
1936
+ "positive_code": ["boolean"],
1937
+ "positive_dict": ["boolean"],
1938
+ "transform_max_iter": [Interval(Integral, 0, None, closed="left")],
1939
+ "callback": [None, callable],
1940
+ "tol": [Interval(Real, 0, None, closed="left")],
1941
+ "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
1942
+ }
1943
+
1944
+ def __init__(
1945
+ self,
1946
+ n_components=None,
1947
+ *,
1948
+ alpha=1,
1949
+ max_iter=1_000,
1950
+ fit_algorithm="lars",
1951
+ n_jobs=None,
1952
+ batch_size=256,
1953
+ shuffle=True,
1954
+ dict_init=None,
1955
+ transform_algorithm="omp",
1956
+ transform_n_nonzero_coefs=None,
1957
+ transform_alpha=None,
1958
+ verbose=False,
1959
+ split_sign=False,
1960
+ random_state=None,
1961
+ positive_code=False,
1962
+ positive_dict=False,
1963
+ transform_max_iter=1000,
1964
+ callback=None,
1965
+ tol=1e-3,
1966
+ max_no_improvement=10,
1967
+ ):
1968
+ super().__init__(
1969
+ transform_algorithm,
1970
+ transform_n_nonzero_coefs,
1971
+ transform_alpha,
1972
+ split_sign,
1973
+ n_jobs,
1974
+ positive_code,
1975
+ transform_max_iter,
1976
+ )
1977
+ self.n_components = n_components
1978
+ self.alpha = alpha
1979
+ self.max_iter = max_iter
1980
+ self.fit_algorithm = fit_algorithm
1981
+ self.dict_init = dict_init
1982
+ self.verbose = verbose
1983
+ self.shuffle = shuffle
1984
+ self.batch_size = batch_size
1985
+ self.split_sign = split_sign
1986
+ self.random_state = random_state
1987
+ self.positive_dict = positive_dict
1988
+ self.callback = callback
1989
+ self.max_no_improvement = max_no_improvement
1990
+ self.tol = tol
1991
+
1992
+ def _check_params(self, X):
1993
+ # n_components
1994
+ self._n_components = self.n_components
1995
+ if self._n_components is None:
1996
+ self._n_components = X.shape[1]
1997
+
1998
+ # fit_algorithm
1999
+ _check_positive_coding(self.fit_algorithm, self.positive_code)
2000
+ self._fit_algorithm = "lasso_" + self.fit_algorithm
2001
+
2002
+ # batch_size
2003
+ self._batch_size = min(self.batch_size, X.shape[0])
2004
+
2005
+ def _initialize_dict(self, X, random_state):
2006
+ """Initialization of the dictionary."""
2007
+ if self.dict_init is not None:
2008
+ dictionary = self.dict_init
2009
+ else:
2010
+ # Init V with SVD of X
2011
+ _, S, dictionary = randomized_svd(
2012
+ X, self._n_components, random_state=random_state
2013
+ )
2014
+ dictionary = S[:, np.newaxis] * dictionary
2015
+
2016
+ if self._n_components <= len(dictionary):
2017
+ dictionary = dictionary[: self._n_components, :]
2018
+ else:
2019
+ dictionary = np.concatenate(
2020
+ (
2021
+ dictionary,
2022
+ np.zeros(
2023
+ (self._n_components - len(dictionary), dictionary.shape[1]),
2024
+ dtype=dictionary.dtype,
2025
+ ),
2026
+ )
2027
+ )
2028
+
2029
+ dictionary = check_array(dictionary, order="F", dtype=X.dtype, copy=False)
2030
+ dictionary = np.require(dictionary, requirements="W")
2031
+
2032
+ return dictionary
2033
+
2034
+ def _update_inner_stats(self, X, code, batch_size, step):
2035
+ """Update the inner stats inplace."""
2036
+ if step < batch_size - 1:
2037
+ theta = (step + 1) * batch_size
2038
+ else:
2039
+ theta = batch_size**2 + step + 1 - batch_size
2040
+ beta = (theta + 1 - batch_size) / (theta + 1)
2041
+
2042
+ self._A *= beta
2043
+ self._A += code.T @ code / batch_size
2044
+ self._B *= beta
2045
+ self._B += X.T @ code / batch_size
2046
+
2047
+ def _minibatch_step(self, X, dictionary, random_state, step):
2048
+ """Perform the update on the dictionary for one minibatch."""
2049
+ batch_size = X.shape[0]
2050
+
2051
+ # Compute code for this batch
2052
+ code = _sparse_encode(
2053
+ X,
2054
+ dictionary,
2055
+ algorithm=self._fit_algorithm,
2056
+ alpha=self.alpha,
2057
+ n_jobs=self.n_jobs,
2058
+ positive=self.positive_code,
2059
+ max_iter=self.transform_max_iter,
2060
+ verbose=self.verbose,
2061
+ )
2062
+
2063
+ batch_cost = (
2064
+ 0.5 * ((X - code @ dictionary) ** 2).sum()
2065
+ + self.alpha * np.sum(np.abs(code))
2066
+ ) / batch_size
2067
+
2068
+ # Update inner stats
2069
+ self._update_inner_stats(X, code, batch_size, step)
2070
+
2071
+ # Update dictionary
2072
+ _update_dict(
2073
+ dictionary,
2074
+ X,
2075
+ code,
2076
+ self._A,
2077
+ self._B,
2078
+ verbose=self.verbose,
2079
+ random_state=random_state,
2080
+ positive=self.positive_dict,
2081
+ )
2082
+
2083
+ return batch_cost
2084
+
2085
+ def _check_convergence(
2086
+ self, X, batch_cost, new_dict, old_dict, n_samples, step, n_steps
2087
+ ):
2088
+ """Helper function to encapsulate the early stopping logic.
2089
+
2090
+ Early stopping is based on two factors:
2091
+ - A small change of the dictionary between two minibatch updates. This is
2092
+ controlled by the tol parameter.
2093
+ - No more improvement on a smoothed estimate of the objective function for a
2094
+ a certain number of consecutive minibatch updates. This is controlled by
2095
+ the max_no_improvement parameter.
2096
+ """
2097
+ batch_size = X.shape[0]
2098
+
2099
+ # counts steps starting from 1 for user friendly verbose mode.
2100
+ step = step + 1
2101
+
2102
+ # Ignore 100 first steps or 1 epoch to avoid initializing the ewa_cost with a
2103
+ # too bad value
2104
+ if step <= min(100, n_samples / batch_size):
2105
+ if self.verbose:
2106
+ print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
2107
+ return False
2108
+
2109
+ # Compute an Exponentially Weighted Average of the cost function to
2110
+ # monitor the convergence while discarding minibatch-local stochastic
2111
+ # variability: https://en.wikipedia.org/wiki/Moving_average
2112
+ if self._ewa_cost is None:
2113
+ self._ewa_cost = batch_cost
2114
+ else:
2115
+ alpha = batch_size / (n_samples + 1)
2116
+ alpha = min(alpha, 1)
2117
+ self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
2118
+
2119
+ if self.verbose:
2120
+ print(
2121
+ f"Minibatch step {step}/{n_steps}: mean batch cost: "
2122
+ f"{batch_cost}, ewa cost: {self._ewa_cost}"
2123
+ )
2124
+
2125
+ # Early stopping based on change of dictionary
2126
+ dict_diff = linalg.norm(new_dict - old_dict) / self._n_components
2127
+ if self.tol > 0 and dict_diff <= self.tol:
2128
+ if self.verbose:
2129
+ print(f"Converged (small dictionary change) at step {step}/{n_steps}")
2130
+ return True
2131
+
2132
+ # Early stopping heuristic due to lack of improvement on smoothed
2133
+ # cost function
2134
+ if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
2135
+ self._no_improvement = 0
2136
+ self._ewa_cost_min = self._ewa_cost
2137
+ else:
2138
+ self._no_improvement += 1
2139
+
2140
+ if (
2141
+ self.max_no_improvement is not None
2142
+ and self._no_improvement >= self.max_no_improvement
2143
+ ):
2144
+ if self.verbose:
2145
+ print(
2146
+ "Converged (lack of improvement in objective function) "
2147
+ f"at step {step}/{n_steps}"
2148
+ )
2149
+ return True
2150
+
2151
+ return False
2152
+
2153
+ @_fit_context(prefer_skip_nested_validation=True)
2154
+ def fit(self, X, y=None):
2155
+ """Fit the model from data in X.
2156
+
2157
+ Parameters
2158
+ ----------
2159
+ X : array-like of shape (n_samples, n_features)
2160
+ Training vector, where `n_samples` is the number of samples
2161
+ and `n_features` is the number of features.
2162
+
2163
+ y : Ignored
2164
+ Not used, present for API consistency by convention.
2165
+
2166
+ Returns
2167
+ -------
2168
+ self : object
2169
+ Returns the instance itself.
2170
+ """
2171
+ X = self._validate_data(
2172
+ X, dtype=[np.float64, np.float32], order="C", copy=False
2173
+ )
2174
+
2175
+ self._check_params(X)
2176
+ self._random_state = check_random_state(self.random_state)
2177
+
2178
+ dictionary = self._initialize_dict(X, self._random_state)
2179
+ old_dict = dictionary.copy()
2180
+
2181
+ if self.shuffle:
2182
+ X_train = X.copy()
2183
+ self._random_state.shuffle(X_train)
2184
+ else:
2185
+ X_train = X
2186
+
2187
+ n_samples, n_features = X_train.shape
2188
+
2189
+ if self.verbose:
2190
+ print("[dict_learning]")
2191
+
2192
+ # Inner stats
2193
+ self._A = np.zeros(
2194
+ (self._n_components, self._n_components), dtype=X_train.dtype
2195
+ )
2196
+ self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype)
2197
+
2198
+ # TODO(1.6): remove in 1.6
2199
+ if self.max_iter is None:
2200
+ warn(
2201
+ (
2202
+ "`max_iter=None` is deprecated in version 1.4 and will be removed"
2203
+ " in version 1.6. Use the default value (i.e. `1_000`) instead."
2204
+ ),
2205
+ FutureWarning,
2206
+ )
2207
+ max_iter = 1_000
2208
+ else:
2209
+ max_iter = self.max_iter
2210
+
2211
+ # Attributes to monitor the convergence
2212
+ self._ewa_cost = None
2213
+ self._ewa_cost_min = None
2214
+ self._no_improvement = 0
2215
+
2216
+ batches = gen_batches(n_samples, self._batch_size)
2217
+ batches = itertools.cycle(batches)
2218
+ n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
2219
+ n_steps = max_iter * n_steps_per_iter
2220
+
2221
+ i = -1 # to allow max_iter = 0
2222
+
2223
+ for i, batch in zip(range(n_steps), batches):
2224
+ X_batch = X_train[batch]
2225
+
2226
+ batch_cost = self._minibatch_step(
2227
+ X_batch, dictionary, self._random_state, i
2228
+ )
2229
+
2230
+ if self._check_convergence(
2231
+ X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps
2232
+ ):
2233
+ break
2234
+
2235
+ # XXX callback param added for backward compat in #18975 but a common
2236
+ # unified callback API should be preferred
2237
+ if self.callback is not None:
2238
+ self.callback(locals())
2239
+
2240
+ old_dict[:] = dictionary
2241
+
2242
+ self.n_steps_ = i + 1
2243
+ self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter)
2244
+ self.components_ = dictionary
2245
+
2246
+ return self
2247
+
2248
+ @_fit_context(prefer_skip_nested_validation=True)
2249
+ def partial_fit(self, X, y=None):
2250
+ """Update the model using the data in X as a mini-batch.
2251
+
2252
+ Parameters
2253
+ ----------
2254
+ X : array-like of shape (n_samples, n_features)
2255
+ Training vector, where `n_samples` is the number of samples
2256
+ and `n_features` is the number of features.
2257
+
2258
+ y : Ignored
2259
+ Not used, present for API consistency by convention.
2260
+
2261
+ Returns
2262
+ -------
2263
+ self : object
2264
+ Return the instance itself.
2265
+ """
2266
+ has_components = hasattr(self, "components_")
2267
+
2268
+ X = self._validate_data(
2269
+ X, dtype=[np.float64, np.float32], order="C", reset=not has_components
2270
+ )
2271
+
2272
+ if not has_components:
2273
+ # This instance has not been fitted yet (fit or partial_fit)
2274
+ self._check_params(X)
2275
+ self._random_state = check_random_state(self.random_state)
2276
+
2277
+ dictionary = self._initialize_dict(X, self._random_state)
2278
+
2279
+ self.n_steps_ = 0
2280
+
2281
+ self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)
2282
+ self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)
2283
+ else:
2284
+ dictionary = self.components_
2285
+
2286
+ self._minibatch_step(X, dictionary, self._random_state, self.n_steps_)
2287
+
2288
+ self.components_ = dictionary
2289
+ self.n_steps_ += 1
2290
+
2291
+ return self
2292
+
2293
+ @property
2294
+ def _n_features_out(self):
2295
+ """Number of transformed output features."""
2296
+ return self.components_.shape[0]
2297
+
2298
+ def _more_tags(self):
2299
+ return {
2300
+ "preserves_dtype": [np.float64, np.float32],
2301
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Factor Analysis.
2
+
3
+ A latent linear variable model.
4
+
5
+ FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
6
+ While PCA assumes Gaussian noise with the same variance for each
7
+ feature, the FactorAnalysis model assumes different variances for
8
+ each of them.
9
+
10
+ This implementation is based on David Barber's Book,
11
+ Bayesian Reasoning and Machine Learning,
12
+ http://www.cs.ucl.ac.uk/staff/d.barber/brml,
13
+ Algorithm 21.1
14
+ """
15
+
16
+ # Author: Christian Osendorfer <[email protected]>
17
+ # Alexandre Gramfort <[email protected]>
18
+ # Denis A. Engemann <[email protected]>
19
+
20
+ # License: BSD3
21
+
22
+ import warnings
23
+ from math import log, sqrt
24
+ from numbers import Integral, Real
25
+
26
+ import numpy as np
27
+ from scipy import linalg
28
+
29
+ from ..base import (
30
+ BaseEstimator,
31
+ ClassNamePrefixFeaturesOutMixin,
32
+ TransformerMixin,
33
+ _fit_context,
34
+ )
35
+ from ..exceptions import ConvergenceWarning
36
+ from ..utils import check_random_state
37
+ from ..utils._param_validation import Interval, StrOptions
38
+ from ..utils.extmath import fast_logdet, randomized_svd, squared_norm
39
+ from ..utils.validation import check_is_fitted
40
+
41
+
42
+ class FactorAnalysis(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
43
+ """Factor Analysis (FA).
44
+
45
+ A simple linear generative model with Gaussian latent variables.
46
+
47
+ The observations are assumed to be caused by a linear transformation of
48
+ lower dimensional latent factors and added Gaussian noise.
49
+ Without loss of generality the factors are distributed according to a
50
+ Gaussian with zero mean and unit covariance. The noise is also zero mean
51
+ and has an arbitrary diagonal covariance matrix.
52
+
53
+ If we would restrict the model further, by assuming that the Gaussian
54
+ noise is even isotropic (all diagonal entries are the same) we would obtain
55
+ :class:`PCA`.
56
+
57
+ FactorAnalysis performs a maximum likelihood estimate of the so-called
58
+ `loading` matrix, the transformation of the latent variables to the
59
+ observed ones, using SVD based approach.
60
+
61
+ Read more in the :ref:`User Guide <FA>`.
62
+
63
+ .. versionadded:: 0.13
64
+
65
+ Parameters
66
+ ----------
67
+ n_components : int, default=None
68
+ Dimensionality of latent space, the number of components
69
+ of ``X`` that are obtained after ``transform``.
70
+ If None, n_components is set to the number of features.
71
+
72
+ tol : float, default=1e-2
73
+ Stopping tolerance for log-likelihood increase.
74
+
75
+ copy : bool, default=True
76
+ Whether to make a copy of X. If ``False``, the input X gets overwritten
77
+ during fitting.
78
+
79
+ max_iter : int, default=1000
80
+ Maximum number of iterations.
81
+
82
+ noise_variance_init : array-like of shape (n_features,), default=None
83
+ The initial guess of the noise variance for each feature.
84
+ If None, it defaults to np.ones(n_features).
85
+
86
+ svd_method : {'lapack', 'randomized'}, default='randomized'
87
+ Which SVD method to use. If 'lapack' use standard SVD from
88
+ scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
89
+ Defaults to 'randomized'. For most applications 'randomized' will
90
+ be sufficiently precise while providing significant speed gains.
91
+ Accuracy can also be improved by setting higher values for
92
+ `iterated_power`. If this is not sufficient, for maximum precision
93
+ you should choose 'lapack'.
94
+
95
+ iterated_power : int, default=3
96
+ Number of iterations for the power method. 3 by default. Only used
97
+ if ``svd_method`` equals 'randomized'.
98
+
99
+ rotation : {'varimax', 'quartimax'}, default=None
100
+ If not None, apply the indicated rotation. Currently, varimax and
101
+ quartimax are implemented. See
102
+ `"The varimax criterion for analytic rotation in factor analysis"
103
+ <https://link.springer.com/article/10.1007%2FBF02289233>`_
104
+ H. F. Kaiser, 1958.
105
+
106
+ .. versionadded:: 0.24
107
+
108
+ random_state : int or RandomState instance, default=0
109
+ Only used when ``svd_method`` equals 'randomized'. Pass an int for
110
+ reproducible results across multiple function calls.
111
+ See :term:`Glossary <random_state>`.
112
+
113
+ Attributes
114
+ ----------
115
+ components_ : ndarray of shape (n_components, n_features)
116
+ Components with maximum variance.
117
+
118
+ loglike_ : list of shape (n_iterations,)
119
+ The log likelihood at each iteration.
120
+
121
+ noise_variance_ : ndarray of shape (n_features,)
122
+ The estimated noise variance for each feature.
123
+
124
+ n_iter_ : int
125
+ Number of iterations run.
126
+
127
+ mean_ : ndarray of shape (n_features,)
128
+ Per-feature empirical mean, estimated from the training set.
129
+
130
+ n_features_in_ : int
131
+ Number of features seen during :term:`fit`.
132
+
133
+ .. versionadded:: 0.24
134
+
135
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
136
+ Names of features seen during :term:`fit`. Defined only when `X`
137
+ has feature names that are all strings.
138
+
139
+ .. versionadded:: 1.0
140
+
141
+ See Also
142
+ --------
143
+ PCA: Principal component analysis is also a latent linear variable model
144
+ which however assumes equal noise variance for each feature.
145
+ This extra assumption makes probabilistic PCA faster as it can be
146
+ computed in closed form.
147
+ FastICA: Independent component analysis, a latent variable model with
148
+ non-Gaussian latent variables.
149
+
150
+ References
151
+ ----------
152
+ - David Barber, Bayesian Reasoning and Machine Learning,
153
+ Algorithm 21.1.
154
+
155
+ - Christopher M. Bishop: Pattern Recognition and Machine Learning,
156
+ Chapter 12.2.4.
157
+
158
+ Examples
159
+ --------
160
+ >>> from sklearn.datasets import load_digits
161
+ >>> from sklearn.decomposition import FactorAnalysis
162
+ >>> X, _ = load_digits(return_X_y=True)
163
+ >>> transformer = FactorAnalysis(n_components=7, random_state=0)
164
+ >>> X_transformed = transformer.fit_transform(X)
165
+ >>> X_transformed.shape
166
+ (1797, 7)
167
+ """
168
+
169
+ _parameter_constraints: dict = {
170
+ "n_components": [Interval(Integral, 0, None, closed="left"), None],
171
+ "tol": [Interval(Real, 0.0, None, closed="left")],
172
+ "copy": ["boolean"],
173
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
174
+ "noise_variance_init": ["array-like", None],
175
+ "svd_method": [StrOptions({"randomized", "lapack"})],
176
+ "iterated_power": [Interval(Integral, 0, None, closed="left")],
177
+ "rotation": [StrOptions({"varimax", "quartimax"}), None],
178
+ "random_state": ["random_state"],
179
+ }
180
+
181
+ def __init__(
182
+ self,
183
+ n_components=None,
184
+ *,
185
+ tol=1e-2,
186
+ copy=True,
187
+ max_iter=1000,
188
+ noise_variance_init=None,
189
+ svd_method="randomized",
190
+ iterated_power=3,
191
+ rotation=None,
192
+ random_state=0,
193
+ ):
194
+ self.n_components = n_components
195
+ self.copy = copy
196
+ self.tol = tol
197
+ self.max_iter = max_iter
198
+ self.svd_method = svd_method
199
+
200
+ self.noise_variance_init = noise_variance_init
201
+ self.iterated_power = iterated_power
202
+ self.random_state = random_state
203
+ self.rotation = rotation
204
+
205
+ @_fit_context(prefer_skip_nested_validation=True)
206
+ def fit(self, X, y=None):
207
+ """Fit the FactorAnalysis model to X using SVD based approach.
208
+
209
+ Parameters
210
+ ----------
211
+ X : array-like of shape (n_samples, n_features)
212
+ Training data.
213
+
214
+ y : Ignored
215
+ Ignored parameter.
216
+
217
+ Returns
218
+ -------
219
+ self : object
220
+ FactorAnalysis class instance.
221
+ """
222
+ X = self._validate_data(X, copy=self.copy, dtype=np.float64)
223
+
224
+ n_samples, n_features = X.shape
225
+ n_components = self.n_components
226
+ if n_components is None:
227
+ n_components = n_features
228
+
229
+ self.mean_ = np.mean(X, axis=0)
230
+ X -= self.mean_
231
+
232
+ # some constant terms
233
+ nsqrt = sqrt(n_samples)
234
+ llconst = n_features * log(2.0 * np.pi) + n_components
235
+ var = np.var(X, axis=0)
236
+
237
+ if self.noise_variance_init is None:
238
+ psi = np.ones(n_features, dtype=X.dtype)
239
+ else:
240
+ if len(self.noise_variance_init) != n_features:
241
+ raise ValueError(
242
+ "noise_variance_init dimension does not "
243
+ "with number of features : %d != %d"
244
+ % (len(self.noise_variance_init), n_features)
245
+ )
246
+ psi = np.array(self.noise_variance_init)
247
+
248
+ loglike = []
249
+ old_ll = -np.inf
250
+ SMALL = 1e-12
251
+
252
+ # we'll modify svd outputs to return unexplained variance
253
+ # to allow for unified computation of loglikelihood
254
+ if self.svd_method == "lapack":
255
+
256
+ def my_svd(X):
257
+ _, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
258
+ return (
259
+ s[:n_components],
260
+ Vt[:n_components],
261
+ squared_norm(s[n_components:]),
262
+ )
263
+
264
+ else: # svd_method == "randomized"
265
+ random_state = check_random_state(self.random_state)
266
+
267
+ def my_svd(X):
268
+ _, s, Vt = randomized_svd(
269
+ X,
270
+ n_components,
271
+ random_state=random_state,
272
+ n_iter=self.iterated_power,
273
+ )
274
+ return s, Vt, squared_norm(X) - squared_norm(s)
275
+
276
+ for i in range(self.max_iter):
277
+ # SMALL helps numerics
278
+ sqrt_psi = np.sqrt(psi) + SMALL
279
+ s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
280
+ s **= 2
281
+ # Use 'maximum' here to avoid sqrt problems.
282
+ W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt
283
+ del Vt
284
+ W *= sqrt_psi
285
+
286
+ # loglikelihood
287
+ ll = llconst + np.sum(np.log(s))
288
+ ll += unexp_var + np.sum(np.log(psi))
289
+ ll *= -n_samples / 2.0
290
+ loglike.append(ll)
291
+ if (ll - old_ll) < self.tol:
292
+ break
293
+ old_ll = ll
294
+
295
+ psi = np.maximum(var - np.sum(W**2, axis=0), SMALL)
296
+ else:
297
+ warnings.warn(
298
+ "FactorAnalysis did not converge."
299
+ + " You might want"
300
+ + " to increase the number of iterations.",
301
+ ConvergenceWarning,
302
+ )
303
+
304
+ self.components_ = W
305
+ if self.rotation is not None:
306
+ self.components_ = self._rotate(W)
307
+ self.noise_variance_ = psi
308
+ self.loglike_ = loglike
309
+ self.n_iter_ = i + 1
310
+ return self
311
+
312
+ def transform(self, X):
313
+ """Apply dimensionality reduction to X using the model.
314
+
315
+ Compute the expected mean of the latent variables.
316
+ See Barber, 21.2.33 (or Bishop, 12.66).
317
+
318
+ Parameters
319
+ ----------
320
+ X : array-like of shape (n_samples, n_features)
321
+ Training data.
322
+
323
+ Returns
324
+ -------
325
+ X_new : ndarray of shape (n_samples, n_components)
326
+ The latent variables of X.
327
+ """
328
+ check_is_fitted(self)
329
+
330
+ X = self._validate_data(X, reset=False)
331
+ Ih = np.eye(len(self.components_))
332
+
333
+ X_transformed = X - self.mean_
334
+
335
+ Wpsi = self.components_ / self.noise_variance_
336
+ cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
337
+ tmp = np.dot(X_transformed, Wpsi.T)
338
+ X_transformed = np.dot(tmp, cov_z)
339
+
340
+ return X_transformed
341
+
342
+ def get_covariance(self):
343
+ """Compute data covariance with the FactorAnalysis model.
344
+
345
+ ``cov = components_.T * components_ + diag(noise_variance)``
346
+
347
+ Returns
348
+ -------
349
+ cov : ndarray of shape (n_features, n_features)
350
+ Estimated covariance of data.
351
+ """
352
+ check_is_fitted(self)
353
+
354
+ cov = np.dot(self.components_.T, self.components_)
355
+ cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
356
+ return cov
357
+
358
+ def get_precision(self):
359
+ """Compute data precision matrix with the FactorAnalysis model.
360
+
361
+ Returns
362
+ -------
363
+ precision : ndarray of shape (n_features, n_features)
364
+ Estimated precision of data.
365
+ """
366
+ check_is_fitted(self)
367
+
368
+ n_features = self.components_.shape[1]
369
+
370
+ # handle corner cases first
371
+ if self.n_components == 0:
372
+ return np.diag(1.0 / self.noise_variance_)
373
+ if self.n_components == n_features:
374
+ return linalg.inv(self.get_covariance())
375
+
376
+ # Get precision using matrix inversion lemma
377
+ components_ = self.components_
378
+ precision = np.dot(components_ / self.noise_variance_, components_.T)
379
+ precision.flat[:: len(precision) + 1] += 1.0
380
+ precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
381
+ precision /= self.noise_variance_[:, np.newaxis]
382
+ precision /= -self.noise_variance_[np.newaxis, :]
383
+ precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
384
+ return precision
385
+
386
+ def score_samples(self, X):
387
+ """Compute the log-likelihood of each sample.
388
+
389
+ Parameters
390
+ ----------
391
+ X : ndarray of shape (n_samples, n_features)
392
+ The data.
393
+
394
+ Returns
395
+ -------
396
+ ll : ndarray of shape (n_samples,)
397
+ Log-likelihood of each sample under the current model.
398
+ """
399
+ check_is_fitted(self)
400
+ X = self._validate_data(X, reset=False)
401
+ Xr = X - self.mean_
402
+ precision = self.get_precision()
403
+ n_features = X.shape[1]
404
+ log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
405
+ log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
406
+ return log_like
407
+
408
+ def score(self, X, y=None):
409
+ """Compute the average log-likelihood of the samples.
410
+
411
+ Parameters
412
+ ----------
413
+ X : ndarray of shape (n_samples, n_features)
414
+ The data.
415
+
416
+ y : Ignored
417
+ Ignored parameter.
418
+
419
+ Returns
420
+ -------
421
+ ll : float
422
+ Average log-likelihood of the samples under the current model.
423
+ """
424
+ return np.mean(self.score_samples(X))
425
+
426
+ def _rotate(self, components, n_components=None, tol=1e-6):
427
+ "Rotate the factor analysis solution."
428
+ # note that tol is not exposed
429
+ return _ortho_rotation(components.T, method=self.rotation, tol=tol)[
430
+ : self.n_components
431
+ ]
432
+
433
+ @property
434
+ def _n_features_out(self):
435
+ """Number of transformed output features."""
436
+ return self.components_.shape[0]
437
+
438
+
439
+ def _ortho_rotation(components, method="varimax", tol=1e-6, max_iter=100):
440
+ """Return rotated components."""
441
+ nrow, ncol = components.shape
442
+ rotation_matrix = np.eye(ncol)
443
+ var = 0
444
+
445
+ for _ in range(max_iter):
446
+ comp_rot = np.dot(components, rotation_matrix)
447
+ if method == "varimax":
448
+ tmp = comp_rot * np.transpose((comp_rot**2).sum(axis=0) / nrow)
449
+ elif method == "quartimax":
450
+ tmp = 0
451
+ u, s, v = np.linalg.svd(np.dot(components.T, comp_rot**3 - tmp))
452
+ rotation_matrix = np.dot(u, v)
453
+ var_new = np.sum(s)
454
+ if var != 0 and var_new < var * (1 + tol):
455
+ break
456
+ var = var_new
457
+
458
+ return np.dot(components, rotation_matrix).T
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Python implementation of the fast ICA algorithms.
3
+
4
+ Reference: Tables 8.3 and 8.4 page 196 in the book:
5
+ Independent Component Analysis, by Hyvarinen et al.
6
+ """
7
+
8
+ # Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
9
+ # Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
10
+ # License: BSD 3 clause
11
+
12
+ import warnings
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ from scipy import linalg
17
+
18
+ from ..base import (
19
+ BaseEstimator,
20
+ ClassNamePrefixFeaturesOutMixin,
21
+ TransformerMixin,
22
+ _fit_context,
23
+ )
24
+ from ..exceptions import ConvergenceWarning
25
+ from ..utils import as_float_array, check_array, check_random_state
26
+ from ..utils._param_validation import Interval, Options, StrOptions, validate_params
27
+ from ..utils.validation import check_is_fitted
28
+
29
+ __all__ = ["fastica", "FastICA"]
30
+
31
+
32
+ def _gs_decorrelation(w, W, j):
33
+ """
34
+ Orthonormalize w wrt the first j rows of W.
35
+
36
+ Parameters
37
+ ----------
38
+ w : ndarray of shape (n,)
39
+ Array to be orthogonalized
40
+
41
+ W : ndarray of shape (p, n)
42
+ Null space definition
43
+
44
+ j : int < p
45
+ The no of (from the first) rows of Null space W wrt which w is
46
+ orthogonalized.
47
+
48
+ Notes
49
+ -----
50
+ Assumes that W is orthogonal
51
+ w changed in place
52
+ """
53
+ w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
54
+ return w
55
+
56
+
57
+ def _sym_decorrelation(W):
58
+ """Symmetric decorrelation
59
+ i.e. W <- (W * W.T) ^{-1/2} * W
60
+ """
61
+ s, u = linalg.eigh(np.dot(W, W.T))
62
+ # Avoid sqrt of negative values because of rounding errors. Note that
63
+ # np.sqrt(tiny) is larger than tiny and therefore this clipping also
64
+ # prevents division by zero in the next step.
65
+ s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None)
66
+
67
+ # u (resp. s) contains the eigenvectors (resp. square roots of
68
+ # the eigenvalues) of W * W.T
69
+ return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])
70
+
71
+
72
+ def _ica_def(X, tol, g, fun_args, max_iter, w_init):
73
+ """Deflationary FastICA using fun approx to neg-entropy function
74
+
75
+ Used internally by FastICA.
76
+ """
77
+
78
+ n_components = w_init.shape[0]
79
+ W = np.zeros((n_components, n_components), dtype=X.dtype)
80
+ n_iter = []
81
+
82
+ # j is the index of the extracted component
83
+ for j in range(n_components):
84
+ w = w_init[j, :].copy()
85
+ w /= np.sqrt((w**2).sum())
86
+
87
+ for i in range(max_iter):
88
+ gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
89
+
90
+ w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
91
+
92
+ _gs_decorrelation(w1, W, j)
93
+
94
+ w1 /= np.sqrt((w1**2).sum())
95
+
96
+ lim = np.abs(np.abs((w1 * w).sum()) - 1)
97
+ w = w1
98
+ if lim < tol:
99
+ break
100
+
101
+ n_iter.append(i + 1)
102
+ W[j, :] = w
103
+
104
+ return W, max(n_iter)
105
+
106
+
107
+ def _ica_par(X, tol, g, fun_args, max_iter, w_init):
108
+ """Parallel FastICA.
109
+
110
+ Used internally by FastICA --main loop
111
+
112
+ """
113
+ W = _sym_decorrelation(w_init)
114
+ del w_init
115
+ p_ = float(X.shape[1])
116
+ for ii in range(max_iter):
117
+ gwtx, g_wtx = g(np.dot(W, X), fun_args)
118
+ W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)
119
+ del gwtx, g_wtx
120
+ # builtin max, abs are faster than numpy counter parts.
121
+ # np.einsum allows having the lowest memory footprint.
122
+ # It is faster than np.diag(np.dot(W1, W.T)).
123
+ lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1))
124
+ W = W1
125
+ if lim < tol:
126
+ break
127
+ else:
128
+ warnings.warn(
129
+ (
130
+ "FastICA did not converge. Consider increasing "
131
+ "tolerance or the maximum number of iterations."
132
+ ),
133
+ ConvergenceWarning,
134
+ )
135
+
136
+ return W, ii + 1
137
+
138
+
139
+ # Some standard non-linear functions.
140
+ # XXX: these should be optimized, as they can be a bottleneck.
141
+ def _logcosh(x, fun_args=None):
142
+ alpha = fun_args.get("alpha", 1.0) # comment it out?
143
+
144
+ x *= alpha
145
+ gx = np.tanh(x, x) # apply the tanh inplace
146
+ g_x = np.empty(x.shape[0], dtype=x.dtype)
147
+ # XXX compute in chunks to avoid extra allocation
148
+ for i, gx_i in enumerate(gx): # please don't vectorize.
149
+ g_x[i] = (alpha * (1 - gx_i**2)).mean()
150
+ return gx, g_x
151
+
152
+
153
+ def _exp(x, fun_args):
154
+ exp = np.exp(-(x**2) / 2)
155
+ gx = x * exp
156
+ g_x = (1 - x**2) * exp
157
+ return gx, g_x.mean(axis=-1)
158
+
159
+
160
+ def _cube(x, fun_args):
161
+ return x**3, (3 * x**2).mean(axis=-1)
162
+
163
+
164
+ @validate_params(
165
+ {
166
+ "X": ["array-like"],
167
+ "return_X_mean": ["boolean"],
168
+ "compute_sources": ["boolean"],
169
+ "return_n_iter": ["boolean"],
170
+ },
171
+ prefer_skip_nested_validation=False,
172
+ )
173
+ def fastica(
174
+ X,
175
+ n_components=None,
176
+ *,
177
+ algorithm="parallel",
178
+ whiten="unit-variance",
179
+ fun="logcosh",
180
+ fun_args=None,
181
+ max_iter=200,
182
+ tol=1e-04,
183
+ w_init=None,
184
+ whiten_solver="svd",
185
+ random_state=None,
186
+ return_X_mean=False,
187
+ compute_sources=True,
188
+ return_n_iter=False,
189
+ ):
190
+ """Perform Fast Independent Component Analysis.
191
+
192
+ The implementation is based on [1]_.
193
+
194
+ Read more in the :ref:`User Guide <ICA>`.
195
+
196
+ Parameters
197
+ ----------
198
+ X : array-like of shape (n_samples, n_features)
199
+ Training vector, where `n_samples` is the number of samples and
200
+ `n_features` is the number of features.
201
+
202
+ n_components : int, default=None
203
+ Number of components to use. If None is passed, all are used.
204
+
205
+ algorithm : {'parallel', 'deflation'}, default='parallel'
206
+ Specify which algorithm to use for FastICA.
207
+
208
+ whiten : str or bool, default='unit-variance'
209
+ Specify the whitening strategy to use.
210
+
211
+ - If 'arbitrary-variance', a whitening with variance
212
+ arbitrary is used.
213
+ - If 'unit-variance', the whitening matrix is rescaled to ensure that
214
+ each recovered source has unit variance.
215
+ - If False, the data is already considered to be whitened, and no
216
+ whitening is performed.
217
+
218
+ .. versionchanged:: 1.3
219
+ The default value of `whiten` changed to 'unit-variance' in 1.3.
220
+
221
+ fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
222
+ The functional form of the G function used in the
223
+ approximation to neg-entropy. Could be either 'logcosh', 'exp',
224
+ or 'cube'.
225
+ You can also provide your own function. It should return a tuple
226
+ containing the value of the function, and of its derivative, in the
227
+ point. The derivative should be averaged along its last dimension.
228
+ Example::
229
+
230
+ def my_g(x):
231
+ return x ** 3, (3 * x ** 2).mean(axis=-1)
232
+
233
+ fun_args : dict, default=None
234
+ Arguments to send to the functional form.
235
+ If empty or None and if fun='logcosh', fun_args will take value
236
+ {'alpha' : 1.0}.
237
+
238
+ max_iter : int, default=200
239
+ Maximum number of iterations to perform.
240
+
241
+ tol : float, default=1e-4
242
+ A positive scalar giving the tolerance at which the
243
+ un-mixing matrix is considered to have converged.
244
+
245
+ w_init : ndarray of shape (n_components, n_components), default=None
246
+ Initial un-mixing array. If `w_init=None`, then an array of values
247
+ drawn from a normal distribution is used.
248
+
249
+ whiten_solver : {"eigh", "svd"}, default="svd"
250
+ The solver to use for whitening.
251
+
252
+ - "svd" is more stable numerically if the problem is degenerate, and
253
+ often faster when `n_samples <= n_features`.
254
+
255
+ - "eigh" is generally more memory efficient when
256
+ `n_samples >= n_features`, and can be faster when
257
+ `n_samples >= 50 * n_features`.
258
+
259
+ .. versionadded:: 1.2
260
+
261
+ random_state : int, RandomState instance or None, default=None
262
+ Used to initialize ``w_init`` when not specified, with a
263
+ normal distribution. Pass an int, for reproducible results
264
+ across multiple function calls.
265
+ See :term:`Glossary <random_state>`.
266
+
267
+ return_X_mean : bool, default=False
268
+ If True, X_mean is returned too.
269
+
270
+ compute_sources : bool, default=True
271
+ If False, sources are not computed, but only the rotation matrix.
272
+ This can save memory when working with big data. Defaults to True.
273
+
274
+ return_n_iter : bool, default=False
275
+ Whether or not to return the number of iterations.
276
+
277
+ Returns
278
+ -------
279
+ K : ndarray of shape (n_components, n_features) or None
280
+ If whiten is 'True', K is the pre-whitening matrix that projects data
281
+ onto the first n_components principal components. If whiten is 'False',
282
+ K is 'None'.
283
+
284
+ W : ndarray of shape (n_components, n_components)
285
+ The square matrix that unmixes the data after whitening.
286
+ The mixing matrix is the pseudo-inverse of matrix ``W K``
287
+ if K is not None, else it is the inverse of W.
288
+
289
+ S : ndarray of shape (n_samples, n_components) or None
290
+ Estimated source matrix.
291
+
292
+ X_mean : ndarray of shape (n_features,)
293
+ The mean over features. Returned only if return_X_mean is True.
294
+
295
+ n_iter : int
296
+ If the algorithm is "deflation", n_iter is the
297
+ maximum number of iterations run across all components. Else
298
+ they are just the number of iterations taken to converge. This is
299
+ returned only when return_n_iter is set to `True`.
300
+
301
+ Notes
302
+ -----
303
+ The data matrix X is considered to be a linear combination of
304
+ non-Gaussian (independent) components i.e. X = AS where columns of S
305
+ contain the independent components and A is a linear mixing
306
+ matrix. In short ICA attempts to `un-mix' the data by estimating an
307
+ un-mixing matrix W where ``S = W K X.``
308
+ While FastICA was proposed to estimate as many sources
309
+ as features, it is possible to estimate less by setting
310
+ n_components < n_features. It this case K is not a square matrix
311
+ and the estimated A is the pseudo-inverse of ``W K``.
312
+
313
+ This implementation was originally made for data of shape
314
+ [n_features, n_samples]. Now the input is transposed
315
+ before the algorithm is applied. This makes it slightly
316
+ faster for Fortran-ordered input.
317
+
318
+ References
319
+ ----------
320
+ .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
321
+ Algorithms and Applications, Neural Networks, 13(4-5), 2000,
322
+ pp. 411-430.
323
+
324
+ Examples
325
+ --------
326
+ >>> from sklearn.datasets import load_digits
327
+ >>> from sklearn.decomposition import fastica
328
+ >>> X, _ = load_digits(return_X_y=True)
329
+ >>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance')
330
+ >>> K.shape
331
+ (7, 64)
332
+ >>> W.shape
333
+ (7, 7)
334
+ >>> S.shape
335
+ (1797, 7)
336
+ """
337
+ est = FastICA(
338
+ n_components=n_components,
339
+ algorithm=algorithm,
340
+ whiten=whiten,
341
+ fun=fun,
342
+ fun_args=fun_args,
343
+ max_iter=max_iter,
344
+ tol=tol,
345
+ w_init=w_init,
346
+ whiten_solver=whiten_solver,
347
+ random_state=random_state,
348
+ )
349
+ est._validate_params()
350
+ S = est._fit_transform(X, compute_sources=compute_sources)
351
+
352
+ if est.whiten in ["unit-variance", "arbitrary-variance"]:
353
+ K = est.whitening_
354
+ X_mean = est.mean_
355
+ else:
356
+ K = None
357
+ X_mean = None
358
+
359
+ returned_values = [K, est._unmixing, S]
360
+ if return_X_mean:
361
+ returned_values.append(X_mean)
362
+ if return_n_iter:
363
+ returned_values.append(est.n_iter_)
364
+
365
+ return returned_values
366
+
367
+
368
+ class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
369
+ """FastICA: a fast algorithm for Independent Component Analysis.
370
+
371
+ The implementation is based on [1]_.
372
+
373
+ Read more in the :ref:`User Guide <ICA>`.
374
+
375
+ Parameters
376
+ ----------
377
+ n_components : int, default=None
378
+ Number of components to use. If None is passed, all are used.
379
+
380
+ algorithm : {'parallel', 'deflation'}, default='parallel'
381
+ Specify which algorithm to use for FastICA.
382
+
383
+ whiten : str or bool, default='unit-variance'
384
+ Specify the whitening strategy to use.
385
+
386
+ - If 'arbitrary-variance', a whitening with variance
387
+ arbitrary is used.
388
+ - If 'unit-variance', the whitening matrix is rescaled to ensure that
389
+ each recovered source has unit variance.
390
+ - If False, the data is already considered to be whitened, and no
391
+ whitening is performed.
392
+
393
+ .. versionchanged:: 1.3
394
+ The default value of `whiten` changed to 'unit-variance' in 1.3.
395
+
396
+ fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
397
+ The functional form of the G function used in the
398
+ approximation to neg-entropy. Could be either 'logcosh', 'exp',
399
+ or 'cube'.
400
+ You can also provide your own function. It should return a tuple
401
+ containing the value of the function, and of its derivative, in the
402
+ point. The derivative should be averaged along its last dimension.
403
+ Example::
404
+
405
+ def my_g(x):
406
+ return x ** 3, (3 * x ** 2).mean(axis=-1)
407
+
408
+ fun_args : dict, default=None
409
+ Arguments to send to the functional form.
410
+ If empty or None and if fun='logcosh', fun_args will take value
411
+ {'alpha' : 1.0}.
412
+
413
+ max_iter : int, default=200
414
+ Maximum number of iterations during fit.
415
+
416
+ tol : float, default=1e-4
417
+ A positive scalar giving the tolerance at which the
418
+ un-mixing matrix is considered to have converged.
419
+
420
+ w_init : array-like of shape (n_components, n_components), default=None
421
+ Initial un-mixing array. If `w_init=None`, then an array of values
422
+ drawn from a normal distribution is used.
423
+
424
+ whiten_solver : {"eigh", "svd"}, default="svd"
425
+ The solver to use for whitening.
426
+
427
+ - "svd" is more stable numerically if the problem is degenerate, and
428
+ often faster when `n_samples <= n_features`.
429
+
430
+ - "eigh" is generally more memory efficient when
431
+ `n_samples >= n_features`, and can be faster when
432
+ `n_samples >= 50 * n_features`.
433
+
434
+ .. versionadded:: 1.2
435
+
436
+ random_state : int, RandomState instance or None, default=None
437
+ Used to initialize ``w_init`` when not specified, with a
438
+ normal distribution. Pass an int, for reproducible results
439
+ across multiple function calls.
440
+ See :term:`Glossary <random_state>`.
441
+
442
+ Attributes
443
+ ----------
444
+ components_ : ndarray of shape (n_components, n_features)
445
+ The linear operator to apply to the data to get the independent
446
+ sources. This is equal to the unmixing matrix when ``whiten`` is
447
+ False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
448
+ ``whiten`` is True.
449
+
450
+ mixing_ : ndarray of shape (n_features, n_components)
451
+ The pseudo-inverse of ``components_``. It is the linear operator
452
+ that maps independent sources to the data.
453
+
454
+ mean_ : ndarray of shape(n_features,)
455
+ The mean over features. Only set if `self.whiten` is True.
456
+
457
+ n_features_in_ : int
458
+ Number of features seen during :term:`fit`.
459
+
460
+ .. versionadded:: 0.24
461
+
462
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
463
+ Names of features seen during :term:`fit`. Defined only when `X`
464
+ has feature names that are all strings.
465
+
466
+ .. versionadded:: 1.0
467
+
468
+ n_iter_ : int
469
+ If the algorithm is "deflation", n_iter is the
470
+ maximum number of iterations run across all components. Else
471
+ they are just the number of iterations taken to converge.
472
+
473
+ whitening_ : ndarray of shape (n_components, n_features)
474
+ Only set if whiten is 'True'. This is the pre-whitening matrix
475
+ that projects data onto the first `n_components` principal components.
476
+
477
+ See Also
478
+ --------
479
+ PCA : Principal component analysis (PCA).
480
+ IncrementalPCA : Incremental principal components analysis (IPCA).
481
+ KernelPCA : Kernel Principal component analysis (KPCA).
482
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
483
+ SparsePCA : Sparse Principal Components Analysis (SparsePCA).
484
+
485
+ References
486
+ ----------
487
+ .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:
488
+ Algorithms and Applications, Neural Networks, 13(4-5), 2000,
489
+ pp. 411-430.
490
+
491
+ Examples
492
+ --------
493
+ >>> from sklearn.datasets import load_digits
494
+ >>> from sklearn.decomposition import FastICA
495
+ >>> X, _ = load_digits(return_X_y=True)
496
+ >>> transformer = FastICA(n_components=7,
497
+ ... random_state=0,
498
+ ... whiten='unit-variance')
499
+ >>> X_transformed = transformer.fit_transform(X)
500
+ >>> X_transformed.shape
501
+ (1797, 7)
502
+ """
503
+
504
+ _parameter_constraints: dict = {
505
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
506
+ "algorithm": [StrOptions({"parallel", "deflation"})],
507
+ "whiten": [
508
+ StrOptions({"arbitrary-variance", "unit-variance"}),
509
+ Options(bool, {False}),
510
+ ],
511
+ "fun": [StrOptions({"logcosh", "exp", "cube"}), callable],
512
+ "fun_args": [dict, None],
513
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
514
+ "tol": [Interval(Real, 0.0, None, closed="left")],
515
+ "w_init": ["array-like", None],
516
+ "whiten_solver": [StrOptions({"eigh", "svd"})],
517
+ "random_state": ["random_state"],
518
+ }
519
+
520
+ def __init__(
521
+ self,
522
+ n_components=None,
523
+ *,
524
+ algorithm="parallel",
525
+ whiten="unit-variance",
526
+ fun="logcosh",
527
+ fun_args=None,
528
+ max_iter=200,
529
+ tol=1e-4,
530
+ w_init=None,
531
+ whiten_solver="svd",
532
+ random_state=None,
533
+ ):
534
+ super().__init__()
535
+ self.n_components = n_components
536
+ self.algorithm = algorithm
537
+ self.whiten = whiten
538
+ self.fun = fun
539
+ self.fun_args = fun_args
540
+ self.max_iter = max_iter
541
+ self.tol = tol
542
+ self.w_init = w_init
543
+ self.whiten_solver = whiten_solver
544
+ self.random_state = random_state
545
+
546
+ def _fit_transform(self, X, compute_sources=False):
547
+ """Fit the model.
548
+
549
+ Parameters
550
+ ----------
551
+ X : array-like of shape (n_samples, n_features)
552
+ Training data, where `n_samples` is the number of samples
553
+ and `n_features` is the number of features.
554
+
555
+ compute_sources : bool, default=False
556
+ If False, sources are not computes but only the rotation matrix.
557
+ This can save memory when working with big data. Defaults to False.
558
+
559
+ Returns
560
+ -------
561
+ S : ndarray of shape (n_samples, n_components) or None
562
+ Sources matrix. `None` if `compute_sources` is `False`.
563
+ """
564
+ XT = self._validate_data(
565
+ X, copy=self.whiten, dtype=[np.float64, np.float32], ensure_min_samples=2
566
+ ).T
567
+ fun_args = {} if self.fun_args is None else self.fun_args
568
+ random_state = check_random_state(self.random_state)
569
+
570
+ alpha = fun_args.get("alpha", 1.0)
571
+ if not 1 <= alpha <= 2:
572
+ raise ValueError("alpha must be in [1,2]")
573
+
574
+ if self.fun == "logcosh":
575
+ g = _logcosh
576
+ elif self.fun == "exp":
577
+ g = _exp
578
+ elif self.fun == "cube":
579
+ g = _cube
580
+ elif callable(self.fun):
581
+
582
+ def g(x, fun_args):
583
+ return self.fun(x, **fun_args)
584
+
585
+ n_features, n_samples = XT.shape
586
+ n_components = self.n_components
587
+ if not self.whiten and n_components is not None:
588
+ n_components = None
589
+ warnings.warn("Ignoring n_components with whiten=False.")
590
+
591
+ if n_components is None:
592
+ n_components = min(n_samples, n_features)
593
+ if n_components > min(n_samples, n_features):
594
+ n_components = min(n_samples, n_features)
595
+ warnings.warn(
596
+ "n_components is too large: it will be set to %s" % n_components
597
+ )
598
+
599
+ if self.whiten:
600
+ # Centering the features of X
601
+ X_mean = XT.mean(axis=-1)
602
+ XT -= X_mean[:, np.newaxis]
603
+
604
+ # Whitening and preprocessing by PCA
605
+ if self.whiten_solver == "eigh":
606
+ # Faster when num_samples >> n_features
607
+ d, u = linalg.eigh(XT.dot(X))
608
+ sort_indices = np.argsort(d)[::-1]
609
+ eps = np.finfo(d.dtype).eps
610
+ degenerate_idx = d < eps
611
+ if np.any(degenerate_idx):
612
+ warnings.warn(
613
+ "There are some small singular values, using "
614
+ "whiten_solver = 'svd' might lead to more "
615
+ "accurate results."
616
+ )
617
+ d[degenerate_idx] = eps # For numerical issues
618
+ np.sqrt(d, out=d)
619
+ d, u = d[sort_indices], u[:, sort_indices]
620
+ elif self.whiten_solver == "svd":
621
+ u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
622
+
623
+ # Give consistent eigenvectors for both svd solvers
624
+ u *= np.sign(u[0])
625
+
626
+ K = (u / d).T[:n_components] # see (6.33) p.140
627
+ del u, d
628
+ X1 = np.dot(K, XT)
629
+ # see (13.6) p.267 Here X1 is white and data
630
+ # in X has been projected onto a subspace by PCA
631
+ X1 *= np.sqrt(n_samples)
632
+ else:
633
+ # X must be casted to floats to avoid typing issues with numpy
634
+ # 2.0 and the line below
635
+ X1 = as_float_array(XT, copy=False) # copy has been taken care of
636
+
637
+ w_init = self.w_init
638
+ if w_init is None:
639
+ w_init = np.asarray(
640
+ random_state.normal(size=(n_components, n_components)), dtype=X1.dtype
641
+ )
642
+
643
+ else:
644
+ w_init = np.asarray(w_init)
645
+ if w_init.shape != (n_components, n_components):
646
+ raise ValueError(
647
+ "w_init has invalid shape -- should be %(shape)s"
648
+ % {"shape": (n_components, n_components)}
649
+ )
650
+
651
+ kwargs = {
652
+ "tol": self.tol,
653
+ "g": g,
654
+ "fun_args": fun_args,
655
+ "max_iter": self.max_iter,
656
+ "w_init": w_init,
657
+ }
658
+
659
+ if self.algorithm == "parallel":
660
+ W, n_iter = _ica_par(X1, **kwargs)
661
+ elif self.algorithm == "deflation":
662
+ W, n_iter = _ica_def(X1, **kwargs)
663
+ del X1
664
+
665
+ self.n_iter_ = n_iter
666
+
667
+ if compute_sources:
668
+ if self.whiten:
669
+ S = np.linalg.multi_dot([W, K, XT]).T
670
+ else:
671
+ S = np.dot(W, XT).T
672
+ else:
673
+ S = None
674
+
675
+ if self.whiten:
676
+ if self.whiten == "unit-variance":
677
+ if not compute_sources:
678
+ S = np.linalg.multi_dot([W, K, XT]).T
679
+ S_std = np.std(S, axis=0, keepdims=True)
680
+ S /= S_std
681
+ W /= S_std.T
682
+
683
+ self.components_ = np.dot(W, K)
684
+ self.mean_ = X_mean
685
+ self.whitening_ = K
686
+ else:
687
+ self.components_ = W
688
+
689
+ self.mixing_ = linalg.pinv(self.components_, check_finite=False)
690
+ self._unmixing = W
691
+
692
+ return S
693
+
694
+ @_fit_context(prefer_skip_nested_validation=True)
695
+ def fit_transform(self, X, y=None):
696
+ """Fit the model and recover the sources from X.
697
+
698
+ Parameters
699
+ ----------
700
+ X : array-like of shape (n_samples, n_features)
701
+ Training data, where `n_samples` is the number of samples
702
+ and `n_features` is the number of features.
703
+
704
+ y : Ignored
705
+ Not used, present for API consistency by convention.
706
+
707
+ Returns
708
+ -------
709
+ X_new : ndarray of shape (n_samples, n_components)
710
+ Estimated sources obtained by transforming the data with the
711
+ estimated unmixing matrix.
712
+ """
713
+ return self._fit_transform(X, compute_sources=True)
714
+
715
+ @_fit_context(prefer_skip_nested_validation=True)
716
+ def fit(self, X, y=None):
717
+ """Fit the model to X.
718
+
719
+ Parameters
720
+ ----------
721
+ X : array-like of shape (n_samples, n_features)
722
+ Training data, where `n_samples` is the number of samples
723
+ and `n_features` is the number of features.
724
+
725
+ y : Ignored
726
+ Not used, present for API consistency by convention.
727
+
728
+ Returns
729
+ -------
730
+ self : object
731
+ Returns the instance itself.
732
+ """
733
+ self._fit_transform(X, compute_sources=False)
734
+ return self
735
+
736
+ def transform(self, X, copy=True):
737
+ """Recover the sources from X (apply the unmixing matrix).
738
+
739
+ Parameters
740
+ ----------
741
+ X : array-like of shape (n_samples, n_features)
742
+ Data to transform, where `n_samples` is the number of samples
743
+ and `n_features` is the number of features.
744
+
745
+ copy : bool, default=True
746
+ If False, data passed to fit can be overwritten. Defaults to True.
747
+
748
+ Returns
749
+ -------
750
+ X_new : ndarray of shape (n_samples, n_components)
751
+ Estimated sources obtained by transforming the data with the
752
+ estimated unmixing matrix.
753
+ """
754
+ check_is_fitted(self)
755
+
756
+ X = self._validate_data(
757
+ X, copy=(copy and self.whiten), dtype=[np.float64, np.float32], reset=False
758
+ )
759
+ if self.whiten:
760
+ X -= self.mean_
761
+
762
+ return np.dot(X, self.components_.T)
763
+
764
+ def inverse_transform(self, X, copy=True):
765
+ """Transform the sources back to the mixed data (apply mixing matrix).
766
+
767
+ Parameters
768
+ ----------
769
+ X : array-like of shape (n_samples, n_components)
770
+ Sources, where `n_samples` is the number of samples
771
+ and `n_components` is the number of components.
772
+ copy : bool, default=True
773
+ If False, data passed to fit are overwritten. Defaults to True.
774
+
775
+ Returns
776
+ -------
777
+ X_new : ndarray of shape (n_samples, n_features)
778
+ Reconstructed data obtained with the mixing matrix.
779
+ """
780
+ check_is_fitted(self)
781
+
782
+ X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32])
783
+ X = np.dot(X, self.mixing_.T)
784
+ if self.whiten:
785
+ X += self.mean_
786
+
787
+ return X
788
+
789
+ @property
790
+ def _n_features_out(self):
791
+ """Number of transformed output features."""
792
+ return self.components_.shape[0]
793
+
794
+ def _more_tags(self):
795
+ return {"preserves_dtype": [np.float32, np.float64]}
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Incremental Principal Components Analysis."""
2
+
3
+ # Author: Kyle Kastner <[email protected]>
4
+ # Giorgio Patrini
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral
8
+
9
+ import numpy as np
10
+ from scipy import linalg, sparse
11
+
12
+ from ..base import _fit_context
13
+ from ..utils import gen_batches
14
+ from ..utils._param_validation import Interval
15
+ from ..utils.extmath import _incremental_mean_and_var, svd_flip
16
+ from ._base import _BasePCA
17
+
18
+
19
+ class IncrementalPCA(_BasePCA):
20
+ """Incremental principal components analysis (IPCA).
21
+
22
+ Linear dimensionality reduction using Singular Value Decomposition of
23
+ the data, keeping only the most significant singular vectors to
24
+ project the data to a lower dimensional space. The input data is centered
25
+ but not scaled for each feature before applying the SVD.
26
+
27
+ Depending on the size of the input data, this algorithm can be much more
28
+ memory efficient than a PCA, and allows sparse input.
29
+
30
+ This algorithm has constant memory complexity, on the order
31
+ of ``batch_size * n_features``, enabling use of np.memmap files without
32
+ loading the entire file into memory. For sparse matrices, the input
33
+ is converted to dense in batches (in order to be able to subtract the
34
+ mean) which avoids storing the entire dense matrix at any one time.
35
+
36
+ The computational overhead of each SVD is
37
+ ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
38
+ remain in memory at a time. There will be ``n_samples / batch_size`` SVD
39
+ computations to get the principal components, versus 1 large SVD of
40
+ complexity ``O(n_samples * n_features ** 2)`` for PCA.
41
+
42
+ For a usage example, see
43
+ :ref:`sphx_glr_auto_examples_decomposition_plot_incremental_pca.py`.
44
+
45
+ Read more in the :ref:`User Guide <IncrementalPCA>`.
46
+
47
+ .. versionadded:: 0.16
48
+
49
+ Parameters
50
+ ----------
51
+ n_components : int, default=None
52
+ Number of components to keep. If ``n_components`` is ``None``,
53
+ then ``n_components`` is set to ``min(n_samples, n_features)``.
54
+
55
+ whiten : bool, default=False
56
+ When True (False by default) the ``components_`` vectors are divided
57
+ by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
58
+ with unit component-wise variances.
59
+
60
+ Whitening will remove some information from the transformed signal
61
+ (the relative variance scales of the components) but can sometimes
62
+ improve the predictive accuracy of the downstream estimators by
63
+ making data respect some hard-wired assumptions.
64
+
65
+ copy : bool, default=True
66
+ If False, X will be overwritten. ``copy=False`` can be used to
67
+ save memory but is unsafe for general use.
68
+
69
+ batch_size : int, default=None
70
+ The number of samples to use for each batch. Only used when calling
71
+ ``fit``. If ``batch_size`` is ``None``, then ``batch_size``
72
+ is inferred from the data and set to ``5 * n_features``, to provide a
73
+ balance between approximation accuracy and memory consumption.
74
+
75
+ Attributes
76
+ ----------
77
+ components_ : ndarray of shape (n_components, n_features)
78
+ Principal axes in feature space, representing the directions of
79
+ maximum variance in the data. Equivalently, the right singular
80
+ vectors of the centered input data, parallel to its eigenvectors.
81
+ The components are sorted by decreasing ``explained_variance_``.
82
+
83
+ explained_variance_ : ndarray of shape (n_components,)
84
+ Variance explained by each of the selected components.
85
+
86
+ explained_variance_ratio_ : ndarray of shape (n_components,)
87
+ Percentage of variance explained by each of the selected components.
88
+ If all components are stored, the sum of explained variances is equal
89
+ to 1.0.
90
+
91
+ singular_values_ : ndarray of shape (n_components,)
92
+ The singular values corresponding to each of the selected components.
93
+ The singular values are equal to the 2-norms of the ``n_components``
94
+ variables in the lower-dimensional space.
95
+
96
+ mean_ : ndarray of shape (n_features,)
97
+ Per-feature empirical mean, aggregate over calls to ``partial_fit``.
98
+
99
+ var_ : ndarray of shape (n_features,)
100
+ Per-feature empirical variance, aggregate over calls to
101
+ ``partial_fit``.
102
+
103
+ noise_variance_ : float
104
+ The estimated noise covariance following the Probabilistic PCA model
105
+ from Tipping and Bishop 1999. See "Pattern Recognition and
106
+ Machine Learning" by C. Bishop, 12.2.1 p. 574 or
107
+ http://www.miketipping.com/papers/met-mppca.pdf.
108
+
109
+ n_components_ : int
110
+ The estimated number of components. Relevant when
111
+ ``n_components=None``.
112
+
113
+ n_samples_seen_ : int
114
+ The number of samples processed by the estimator. Will be reset on
115
+ new calls to fit, but increments across ``partial_fit`` calls.
116
+
117
+ batch_size_ : int
118
+ Inferred batch size from ``batch_size``.
119
+
120
+ n_features_in_ : int
121
+ Number of features seen during :term:`fit`.
122
+
123
+ .. versionadded:: 0.24
124
+
125
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
126
+ Names of features seen during :term:`fit`. Defined only when `X`
127
+ has feature names that are all strings.
128
+
129
+ .. versionadded:: 1.0
130
+
131
+ See Also
132
+ --------
133
+ PCA : Principal component analysis (PCA).
134
+ KernelPCA : Kernel Principal component analysis (KPCA).
135
+ SparsePCA : Sparse Principal Components Analysis (SparsePCA).
136
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
137
+
138
+ Notes
139
+ -----
140
+ Implements the incremental PCA model from:
141
+ *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
142
+ Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
143
+ pp. 125-141, May 2008.*
144
+ See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
145
+
146
+ This model is an extension of the Sequential Karhunen-Loeve Transform from:
147
+ :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
148
+ its Application to Images, IEEE Transactions on Image Processing, Volume 9,
149
+ Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>`
150
+
151
+ We have specifically abstained from an optimization used by authors of both
152
+ papers, a QR decomposition used in specific situations to reduce the
153
+ algorithmic complexity of the SVD. The source for this technique is
154
+ *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
155
+ section 5.4.4, pp 252-253.*. This technique has been omitted because it is
156
+ advantageous only when decomposing a matrix with ``n_samples`` (rows)
157
+ >= 5/3 * ``n_features`` (columns), and hurts the readability of the
158
+ implemented algorithm. This would be a good opportunity for future
159
+ optimization, if it is deemed necessary.
160
+
161
+ References
162
+ ----------
163
+ D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
164
+ Tracking, International Journal of Computer Vision, Volume 77,
165
+ Issue 1-3, pp. 125-141, May 2008.
166
+
167
+ G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
168
+ Section 5.4.4, pp. 252-253.
169
+
170
+ Examples
171
+ --------
172
+ >>> from sklearn.datasets import load_digits
173
+ >>> from sklearn.decomposition import IncrementalPCA
174
+ >>> from scipy import sparse
175
+ >>> X, _ = load_digits(return_X_y=True)
176
+ >>> transformer = IncrementalPCA(n_components=7, batch_size=200)
177
+ >>> # either partially fit on smaller batches of data
178
+ >>> transformer.partial_fit(X[:100, :])
179
+ IncrementalPCA(batch_size=200, n_components=7)
180
+ >>> # or let the fit function itself divide the data into batches
181
+ >>> X_sparse = sparse.csr_matrix(X)
182
+ >>> X_transformed = transformer.fit_transform(X_sparse)
183
+ >>> X_transformed.shape
184
+ (1797, 7)
185
+ """
186
+
187
+ _parameter_constraints: dict = {
188
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
189
+ "whiten": ["boolean"],
190
+ "copy": ["boolean"],
191
+ "batch_size": [Interval(Integral, 1, None, closed="left"), None],
192
+ }
193
+
194
+ def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None):
195
+ self.n_components = n_components
196
+ self.whiten = whiten
197
+ self.copy = copy
198
+ self.batch_size = batch_size
199
+
200
+ @_fit_context(prefer_skip_nested_validation=True)
201
+ def fit(self, X, y=None):
202
+ """Fit the model with X, using minibatches of size batch_size.
203
+
204
+ Parameters
205
+ ----------
206
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
207
+ Training data, where `n_samples` is the number of samples and
208
+ `n_features` is the number of features.
209
+
210
+ y : Ignored
211
+ Not used, present for API consistency by convention.
212
+
213
+ Returns
214
+ -------
215
+ self : object
216
+ Returns the instance itself.
217
+ """
218
+ self.components_ = None
219
+ self.n_samples_seen_ = 0
220
+ self.mean_ = 0.0
221
+ self.var_ = 0.0
222
+ self.singular_values_ = None
223
+ self.explained_variance_ = None
224
+ self.explained_variance_ratio_ = None
225
+ self.noise_variance_ = None
226
+
227
+ X = self._validate_data(
228
+ X,
229
+ accept_sparse=["csr", "csc", "lil"],
230
+ copy=self.copy,
231
+ dtype=[np.float64, np.float32],
232
+ )
233
+ n_samples, n_features = X.shape
234
+
235
+ if self.batch_size is None:
236
+ self.batch_size_ = 5 * n_features
237
+ else:
238
+ self.batch_size_ = self.batch_size
239
+
240
+ for batch in gen_batches(
241
+ n_samples, self.batch_size_, min_batch_size=self.n_components or 0
242
+ ):
243
+ X_batch = X[batch]
244
+ if sparse.issparse(X_batch):
245
+ X_batch = X_batch.toarray()
246
+ self.partial_fit(X_batch, check_input=False)
247
+
248
+ return self
249
+
250
+ @_fit_context(prefer_skip_nested_validation=True)
251
+ def partial_fit(self, X, y=None, check_input=True):
252
+ """Incremental fit with X. All of X is processed as a single batch.
253
+
254
+ Parameters
255
+ ----------
256
+ X : array-like of shape (n_samples, n_features)
257
+ Training data, where `n_samples` is the number of samples and
258
+ `n_features` is the number of features.
259
+
260
+ y : Ignored
261
+ Not used, present for API consistency by convention.
262
+
263
+ check_input : bool, default=True
264
+ Run check_array on X.
265
+
266
+ Returns
267
+ -------
268
+ self : object
269
+ Returns the instance itself.
270
+ """
271
+ first_pass = not hasattr(self, "components_")
272
+
273
+ if check_input:
274
+ if sparse.issparse(X):
275
+ raise TypeError(
276
+ "IncrementalPCA.partial_fit does not support "
277
+ "sparse input. Either convert data to dense "
278
+ "or use IncrementalPCA.fit to do so in batches."
279
+ )
280
+ X = self._validate_data(
281
+ X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass
282
+ )
283
+ n_samples, n_features = X.shape
284
+ if first_pass:
285
+ self.components_ = None
286
+
287
+ if self.n_components is None:
288
+ if self.components_ is None:
289
+ self.n_components_ = min(n_samples, n_features)
290
+ else:
291
+ self.n_components_ = self.components_.shape[0]
292
+ elif not self.n_components <= n_features:
293
+ raise ValueError(
294
+ "n_components=%r invalid for n_features=%d, need "
295
+ "more rows than columns for IncrementalPCA "
296
+ "processing" % (self.n_components, n_features)
297
+ )
298
+ elif not self.n_components <= n_samples:
299
+ raise ValueError(
300
+ "n_components=%r must be less or equal to "
301
+ "the batch number of samples "
302
+ "%d." % (self.n_components, n_samples)
303
+ )
304
+ else:
305
+ self.n_components_ = self.n_components
306
+
307
+ if (self.components_ is not None) and (
308
+ self.components_.shape[0] != self.n_components_
309
+ ):
310
+ raise ValueError(
311
+ "Number of input features has changed from %i "
312
+ "to %i between calls to partial_fit! Try "
313
+ "setting n_components to a fixed value."
314
+ % (self.components_.shape[0], self.n_components_)
315
+ )
316
+
317
+ # This is the first partial_fit
318
+ if not hasattr(self, "n_samples_seen_"):
319
+ self.n_samples_seen_ = 0
320
+ self.mean_ = 0.0
321
+ self.var_ = 0.0
322
+
323
+ # Update stats - they are 0 if this is the first step
324
+ col_mean, col_var, n_total_samples = _incremental_mean_and_var(
325
+ X,
326
+ last_mean=self.mean_,
327
+ last_variance=self.var_,
328
+ last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
329
+ )
330
+ n_total_samples = n_total_samples[0]
331
+
332
+ # Whitening
333
+ if self.n_samples_seen_ == 0:
334
+ # If it is the first step, simply whiten X
335
+ X -= col_mean
336
+ else:
337
+ col_batch_mean = np.mean(X, axis=0)
338
+ X -= col_batch_mean
339
+ # Build matrix of combined previous basis and new data
340
+ mean_correction = np.sqrt(
341
+ (self.n_samples_seen_ / n_total_samples) * n_samples
342
+ ) * (self.mean_ - col_batch_mean)
343
+ X = np.vstack(
344
+ (
345
+ self.singular_values_.reshape((-1, 1)) * self.components_,
346
+ X,
347
+ mean_correction,
348
+ )
349
+ )
350
+
351
+ U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
352
+ U, Vt = svd_flip(U, Vt, u_based_decision=False)
353
+ explained_variance = S**2 / (n_total_samples - 1)
354
+ explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples)
355
+
356
+ self.n_samples_seen_ = n_total_samples
357
+ self.components_ = Vt[: self.n_components_]
358
+ self.singular_values_ = S[: self.n_components_]
359
+ self.mean_ = col_mean
360
+ self.var_ = col_var
361
+ self.explained_variance_ = explained_variance[: self.n_components_]
362
+ self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
363
+ # we already checked `self.n_components <= n_samples` above
364
+ if self.n_components_ not in (n_samples, n_features):
365
+ self.noise_variance_ = explained_variance[self.n_components_ :].mean()
366
+ else:
367
+ self.noise_variance_ = 0.0
368
+ return self
369
+
370
+ def transform(self, X):
371
+ """Apply dimensionality reduction to X.
372
+
373
+ X is projected on the first principal components previously extracted
374
+ from a training set, using minibatches of size batch_size if X is
375
+ sparse.
376
+
377
+ Parameters
378
+ ----------
379
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
380
+ New data, where `n_samples` is the number of samples
381
+ and `n_features` is the number of features.
382
+
383
+ Returns
384
+ -------
385
+ X_new : ndarray of shape (n_samples, n_components)
386
+ Projection of X in the first principal components.
387
+
388
+ Examples
389
+ --------
390
+
391
+ >>> import numpy as np
392
+ >>> from sklearn.decomposition import IncrementalPCA
393
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
394
+ ... [1, 1], [2, 1], [3, 2]])
395
+ >>> ipca = IncrementalPCA(n_components=2, batch_size=3)
396
+ >>> ipca.fit(X)
397
+ IncrementalPCA(batch_size=3, n_components=2)
398
+ >>> ipca.transform(X) # doctest: +SKIP
399
+ """
400
+ if sparse.issparse(X):
401
+ n_samples = X.shape[0]
402
+ output = []
403
+ for batch in gen_batches(
404
+ n_samples, self.batch_size_, min_batch_size=self.n_components or 0
405
+ ):
406
+ output.append(super().transform(X[batch].toarray()))
407
+ return np.vstack(output)
408
+ else:
409
+ return super().transform(X)
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Kernel Principal Components Analysis."""
2
+
3
+ # Author: Mathieu Blondel <[email protected]>
4
+ # Sylvain Marie <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ from scipy import linalg
11
+ from scipy.linalg import eigh
12
+ from scipy.sparse.linalg import eigsh
13
+
14
+ from ..base import (
15
+ BaseEstimator,
16
+ ClassNamePrefixFeaturesOutMixin,
17
+ TransformerMixin,
18
+ _fit_context,
19
+ )
20
+ from ..exceptions import NotFittedError
21
+ from ..metrics.pairwise import pairwise_kernels
22
+ from ..preprocessing import KernelCenterer
23
+ from ..utils._arpack import _init_arpack_v0
24
+ from ..utils._param_validation import Interval, StrOptions
25
+ from ..utils.extmath import _randomized_eigsh, svd_flip
26
+ from ..utils.validation import (
27
+ _check_psd_eigenvalues,
28
+ check_is_fitted,
29
+ )
30
+
31
+
32
+ class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
33
+ """Kernel Principal component analysis (KPCA) [1]_.
34
+
35
+ Non-linear dimensionality reduction through the use of kernels (see
36
+ :ref:`metrics`).
37
+
38
+ It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD
39
+ or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the
40
+ truncated SVD, depending on the shape of the input data and the number of
41
+ components to extract. It can also use a randomized truncated SVD by the
42
+ method proposed in [3]_, see `eigen_solver`.
43
+
44
+ For a usage example, see
45
+ :ref:`sphx_glr_auto_examples_decomposition_plot_kernel_pca.py`.
46
+
47
+ Read more in the :ref:`User Guide <kernel_PCA>`.
48
+
49
+ Parameters
50
+ ----------
51
+ n_components : int, default=None
52
+ Number of components. If None, all non-zero components are kept.
53
+
54
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \
55
+ or callable, default='linear'
56
+ Kernel used for PCA.
57
+
58
+ gamma : float, default=None
59
+ Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
60
+ kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``.
61
+
62
+ degree : float, default=3
63
+ Degree for poly kernels. Ignored by other kernels.
64
+
65
+ coef0 : float, default=1
66
+ Independent term in poly and sigmoid kernels.
67
+ Ignored by other kernels.
68
+
69
+ kernel_params : dict, default=None
70
+ Parameters (keyword arguments) and
71
+ values for kernel passed as callable object.
72
+ Ignored by other kernels.
73
+
74
+ alpha : float, default=1.0
75
+ Hyperparameter of the ridge regression that learns the
76
+ inverse transform (when fit_inverse_transform=True).
77
+
78
+ fit_inverse_transform : bool, default=False
79
+ Learn the inverse transform for non-precomputed kernels
80
+ (i.e. learn to find the pre-image of a point). This method is based
81
+ on [2]_.
82
+
83
+ eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \
84
+ default='auto'
85
+ Select eigensolver to use. If `n_components` is much
86
+ less than the number of training samples, randomized (or arpack to a
87
+ smaller extent) may be more efficient than the dense eigensolver.
88
+ Randomized SVD is performed according to the method of Halko et al
89
+ [3]_.
90
+
91
+ auto :
92
+ the solver is selected by a default policy based on n_samples
93
+ (the number of training samples) and `n_components`:
94
+ if the number of components to extract is less than 10 (strict) and
95
+ the number of samples is more than 200 (strict), the 'arpack'
96
+ method is enabled. Otherwise the exact full eigenvalue
97
+ decomposition is computed and optionally truncated afterwards
98
+ ('dense' method).
99
+ dense :
100
+ run exact full eigenvalue decomposition calling the standard
101
+ LAPACK solver via `scipy.linalg.eigh`, and select the components
102
+ by postprocessing
103
+ arpack :
104
+ run SVD truncated to n_components calling ARPACK solver using
105
+ `scipy.sparse.linalg.eigsh`. It requires strictly
106
+ 0 < n_components < n_samples
107
+ randomized :
108
+ run randomized SVD by the method of Halko et al. [3]_. The current
109
+ implementation selects eigenvalues based on their module; therefore
110
+ using this method can lead to unexpected results if the kernel is
111
+ not positive semi-definite. See also [4]_.
112
+
113
+ .. versionchanged:: 1.0
114
+ `'randomized'` was added.
115
+
116
+ tol : float, default=0
117
+ Convergence tolerance for arpack.
118
+ If 0, optimal value will be chosen by arpack.
119
+
120
+ max_iter : int, default=None
121
+ Maximum number of iterations for arpack.
122
+ If None, optimal value will be chosen by arpack.
123
+
124
+ iterated_power : int >= 0, or 'auto', default='auto'
125
+ Number of iterations for the power method computed by
126
+ svd_solver == 'randomized'. When 'auto', it is set to 7 when
127
+ `n_components < 0.1 * min(X.shape)`, other it is set to 4.
128
+
129
+ .. versionadded:: 1.0
130
+
131
+ remove_zero_eig : bool, default=False
132
+ If True, then all components with zero eigenvalues are removed, so
133
+ that the number of components in the output may be < n_components
134
+ (and sometimes even zero due to numerical instability).
135
+ When n_components is None, this parameter is ignored and components
136
+ with zero eigenvalues are removed regardless.
137
+
138
+ random_state : int, RandomState instance or None, default=None
139
+ Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int
140
+ for reproducible results across multiple function calls.
141
+ See :term:`Glossary <random_state>`.
142
+
143
+ .. versionadded:: 0.18
144
+
145
+ copy_X : bool, default=True
146
+ If True, input X is copied and stored by the model in the `X_fit_`
147
+ attribute. If no further changes will be done to X, setting
148
+ `copy_X=False` saves memory by storing a reference.
149
+
150
+ .. versionadded:: 0.18
151
+
152
+ n_jobs : int, default=None
153
+ The number of parallel jobs to run.
154
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
155
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
156
+ for more details.
157
+
158
+ .. versionadded:: 0.18
159
+
160
+ Attributes
161
+ ----------
162
+ eigenvalues_ : ndarray of shape (n_components,)
163
+ Eigenvalues of the centered kernel matrix in decreasing order.
164
+ If `n_components` and `remove_zero_eig` are not set,
165
+ then all values are stored.
166
+
167
+ eigenvectors_ : ndarray of shape (n_samples, n_components)
168
+ Eigenvectors of the centered kernel matrix. If `n_components` and
169
+ `remove_zero_eig` are not set, then all components are stored.
170
+
171
+ dual_coef_ : ndarray of shape (n_samples, n_features)
172
+ Inverse transform matrix. Only available when
173
+ ``fit_inverse_transform`` is True.
174
+
175
+ X_transformed_fit_ : ndarray of shape (n_samples, n_components)
176
+ Projection of the fitted data on the kernel principal components.
177
+ Only available when ``fit_inverse_transform`` is True.
178
+
179
+ X_fit_ : ndarray of shape (n_samples, n_features)
180
+ The data used to fit the model. If `copy_X=False`, then `X_fit_` is
181
+ a reference. This attribute is used for the calls to transform.
182
+
183
+ n_features_in_ : int
184
+ Number of features seen during :term:`fit`.
185
+
186
+ .. versionadded:: 0.24
187
+
188
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
189
+ Names of features seen during :term:`fit`. Defined only when `X`
190
+ has feature names that are all strings.
191
+
192
+ .. versionadded:: 1.0
193
+
194
+ gamma_ : float
195
+ Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma`
196
+ is explicitly provided, this is just the same as `gamma`. When `gamma`
197
+ is `None`, this is the actual value of kernel coefficient.
198
+
199
+ .. versionadded:: 1.3
200
+
201
+ See Also
202
+ --------
203
+ FastICA : A fast algorithm for Independent Component Analysis.
204
+ IncrementalPCA : Incremental Principal Component Analysis.
205
+ NMF : Non-Negative Matrix Factorization.
206
+ PCA : Principal Component Analysis.
207
+ SparsePCA : Sparse Principal Component Analysis.
208
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
209
+
210
+ References
211
+ ----------
212
+ .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
213
+ "Kernel principal component analysis."
214
+ International conference on artificial neural networks.
215
+ Springer, Berlin, Heidelberg, 1997.
216
+ <https://people.eecs.berkeley.edu/~wainwrig/stat241b/scholkopf_kernel.pdf>`_
217
+
218
+ .. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
219
+ "Learning to find pre-images."
220
+ Advances in neural information processing systems 16 (2004): 449-456.
221
+ <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
222
+
223
+ .. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp.
224
+ "Finding structure with randomness: Probabilistic algorithms for
225
+ constructing approximate matrix decompositions."
226
+ SIAM review 53.2 (2011): 217-288. <0909.4061>`
227
+
228
+ .. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert.
229
+ "A randomized algorithm for the decomposition of matrices."
230
+ Applied and Computational Harmonic Analysis 30.1 (2011): 47-68.
231
+ <https://www.sciencedirect.com/science/article/pii/S1063520310000242>`_
232
+
233
+ Examples
234
+ --------
235
+ >>> from sklearn.datasets import load_digits
236
+ >>> from sklearn.decomposition import KernelPCA
237
+ >>> X, _ = load_digits(return_X_y=True)
238
+ >>> transformer = KernelPCA(n_components=7, kernel='linear')
239
+ >>> X_transformed = transformer.fit_transform(X)
240
+ >>> X_transformed.shape
241
+ (1797, 7)
242
+ """
243
+
244
+ _parameter_constraints: dict = {
245
+ "n_components": [
246
+ Interval(Integral, 1, None, closed="left"),
247
+ None,
248
+ ],
249
+ "kernel": [
250
+ StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}),
251
+ callable,
252
+ ],
253
+ "gamma": [
254
+ Interval(Real, 0, None, closed="left"),
255
+ None,
256
+ ],
257
+ "degree": [Interval(Real, 0, None, closed="left")],
258
+ "coef0": [Interval(Real, None, None, closed="neither")],
259
+ "kernel_params": [dict, None],
260
+ "alpha": [Interval(Real, 0, None, closed="left")],
261
+ "fit_inverse_transform": ["boolean"],
262
+ "eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})],
263
+ "tol": [Interval(Real, 0, None, closed="left")],
264
+ "max_iter": [
265
+ Interval(Integral, 1, None, closed="left"),
266
+ None,
267
+ ],
268
+ "iterated_power": [
269
+ Interval(Integral, 0, None, closed="left"),
270
+ StrOptions({"auto"}),
271
+ ],
272
+ "remove_zero_eig": ["boolean"],
273
+ "random_state": ["random_state"],
274
+ "copy_X": ["boolean"],
275
+ "n_jobs": [None, Integral],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ n_components=None,
281
+ *,
282
+ kernel="linear",
283
+ gamma=None,
284
+ degree=3,
285
+ coef0=1,
286
+ kernel_params=None,
287
+ alpha=1.0,
288
+ fit_inverse_transform=False,
289
+ eigen_solver="auto",
290
+ tol=0,
291
+ max_iter=None,
292
+ iterated_power="auto",
293
+ remove_zero_eig=False,
294
+ random_state=None,
295
+ copy_X=True,
296
+ n_jobs=None,
297
+ ):
298
+ self.n_components = n_components
299
+ self.kernel = kernel
300
+ self.kernel_params = kernel_params
301
+ self.gamma = gamma
302
+ self.degree = degree
303
+ self.coef0 = coef0
304
+ self.alpha = alpha
305
+ self.fit_inverse_transform = fit_inverse_transform
306
+ self.eigen_solver = eigen_solver
307
+ self.tol = tol
308
+ self.max_iter = max_iter
309
+ self.iterated_power = iterated_power
310
+ self.remove_zero_eig = remove_zero_eig
311
+ self.random_state = random_state
312
+ self.n_jobs = n_jobs
313
+ self.copy_X = copy_X
314
+
315
+ def _get_kernel(self, X, Y=None):
316
+ if callable(self.kernel):
317
+ params = self.kernel_params or {}
318
+ else:
319
+ params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0}
320
+ return pairwise_kernels(
321
+ X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params
322
+ )
323
+
324
+ def _fit_transform(self, K):
325
+ """Fit's using kernel K"""
326
+ # center kernel
327
+ K = self._centerer.fit_transform(K)
328
+
329
+ # adjust n_components according to user inputs
330
+ if self.n_components is None:
331
+ n_components = K.shape[0] # use all dimensions
332
+ else:
333
+ n_components = min(K.shape[0], self.n_components)
334
+
335
+ # compute eigenvectors
336
+ if self.eigen_solver == "auto":
337
+ if K.shape[0] > 200 and n_components < 10:
338
+ eigen_solver = "arpack"
339
+ else:
340
+ eigen_solver = "dense"
341
+ else:
342
+ eigen_solver = self.eigen_solver
343
+
344
+ if eigen_solver == "dense":
345
+ # Note: subset_by_index specifies the indices of smallest/largest to return
346
+ self.eigenvalues_, self.eigenvectors_ = eigh(
347
+ K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1)
348
+ )
349
+ elif eigen_solver == "arpack":
350
+ v0 = _init_arpack_v0(K.shape[0], self.random_state)
351
+ self.eigenvalues_, self.eigenvectors_ = eigsh(
352
+ K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0
353
+ )
354
+ elif eigen_solver == "randomized":
355
+ self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh(
356
+ K,
357
+ n_components=n_components,
358
+ n_iter=self.iterated_power,
359
+ random_state=self.random_state,
360
+ selection="module",
361
+ )
362
+
363
+ # make sure that the eigenvalues are ok and fix numerical issues
364
+ self.eigenvalues_ = _check_psd_eigenvalues(
365
+ self.eigenvalues_, enable_warnings=False
366
+ )
367
+
368
+ # flip eigenvectors' sign to enforce deterministic output
369
+ self.eigenvectors_, _ = svd_flip(
370
+ self.eigenvectors_, np.zeros_like(self.eigenvectors_).T
371
+ )
372
+
373
+ # sort eigenvectors in descending order
374
+ indices = self.eigenvalues_.argsort()[::-1]
375
+ self.eigenvalues_ = self.eigenvalues_[indices]
376
+ self.eigenvectors_ = self.eigenvectors_[:, indices]
377
+
378
+ # remove eigenvectors with a zero eigenvalue (null space) if required
379
+ if self.remove_zero_eig or self.n_components is None:
380
+ self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0]
381
+ self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0]
382
+
383
+ # Maintenance note on Eigenvectors normalization
384
+ # ----------------------------------------------
385
+ # there is a link between
386
+ # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)'
387
+ # if v is an eigenvector of K
388
+ # then Phi(X)v is an eigenvector of Phi(X)Phi(X)'
389
+ # if u is an eigenvector of Phi(X)Phi(X)'
390
+ # then Phi(X)'u is an eigenvector of Phi(X)'Phi(X)
391
+ #
392
+ # At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale
393
+ # them so that eigenvectors in kernel feature space (the u) have norm=1
394
+ # instead
395
+ #
396
+ # We COULD scale them here:
397
+ # self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_)
398
+ #
399
+ # But choose to perform that LATER when needed, in `fit()` and in
400
+ # `transform()`.
401
+
402
+ return K
403
+
404
+ def _fit_inverse_transform(self, X_transformed, X):
405
+ if hasattr(X, "tocsr"):
406
+ raise NotImplementedError(
407
+ "Inverse transform not implemented for sparse matrices!"
408
+ )
409
+
410
+ n_samples = X_transformed.shape[0]
411
+ K = self._get_kernel(X_transformed)
412
+ K.flat[:: n_samples + 1] += self.alpha
413
+ self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True)
414
+ self.X_transformed_fit_ = X_transformed
415
+
416
+ @_fit_context(prefer_skip_nested_validation=True)
417
+ def fit(self, X, y=None):
418
+ """Fit the model from data in X.
419
+
420
+ Parameters
421
+ ----------
422
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
423
+ Training vector, where `n_samples` is the number of samples
424
+ and `n_features` is the number of features.
425
+
426
+ y : Ignored
427
+ Not used, present for API consistency by convention.
428
+
429
+ Returns
430
+ -------
431
+ self : object
432
+ Returns the instance itself.
433
+ """
434
+ if self.fit_inverse_transform and self.kernel == "precomputed":
435
+ raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.")
436
+ X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X)
437
+ self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma
438
+ self._centerer = KernelCenterer().set_output(transform="default")
439
+ K = self._get_kernel(X)
440
+ self._fit_transform(K)
441
+
442
+ if self.fit_inverse_transform:
443
+ # no need to use the kernel to transform X, use shortcut expression
444
+ X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
445
+
446
+ self._fit_inverse_transform(X_transformed, X)
447
+
448
+ self.X_fit_ = X
449
+ return self
450
+
451
+ def fit_transform(self, X, y=None, **params):
452
+ """Fit the model from data in X and transform X.
453
+
454
+ Parameters
455
+ ----------
456
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
457
+ Training vector, where `n_samples` is the number of samples
458
+ and `n_features` is the number of features.
459
+
460
+ y : Ignored
461
+ Not used, present for API consistency by convention.
462
+
463
+ **params : kwargs
464
+ Parameters (keyword arguments) and values passed to
465
+ the fit_transform instance.
466
+
467
+ Returns
468
+ -------
469
+ X_new : ndarray of shape (n_samples, n_components)
470
+ Returns the instance itself.
471
+ """
472
+ self.fit(X, **params)
473
+
474
+ # no need to use the kernel to transform X, use shortcut expression
475
+ X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
476
+
477
+ if self.fit_inverse_transform:
478
+ self._fit_inverse_transform(X_transformed, X)
479
+
480
+ return X_transformed
481
+
482
+ def transform(self, X):
483
+ """Transform X.
484
+
485
+ Parameters
486
+ ----------
487
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
488
+ Training vector, where `n_samples` is the number of samples
489
+ and `n_features` is the number of features.
490
+
491
+ Returns
492
+ -------
493
+ X_new : ndarray of shape (n_samples, n_components)
494
+ Returns the instance itself.
495
+ """
496
+ check_is_fitted(self)
497
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
498
+
499
+ # Compute centered gram matrix between X and training data X_fit_
500
+ K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
501
+
502
+ # scale eigenvectors (properly account for null-space for dot product)
503
+ non_zeros = np.flatnonzero(self.eigenvalues_)
504
+ scaled_alphas = np.zeros_like(self.eigenvectors_)
505
+ scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt(
506
+ self.eigenvalues_[non_zeros]
507
+ )
508
+
509
+ # Project with a scalar product between K and the scaled eigenvectors
510
+ return np.dot(K, scaled_alphas)
511
+
512
+ def inverse_transform(self, X):
513
+ """Transform X back to original space.
514
+
515
+ ``inverse_transform`` approximates the inverse transformation using
516
+ a learned pre-image. The pre-image is learned by kernel ridge
517
+ regression of the original data on their low-dimensional representation
518
+ vectors.
519
+
520
+ .. note:
521
+ :meth:`~sklearn.decomposition.fit` internally uses a centered
522
+ kernel. As the centered kernel no longer contains the information
523
+ of the mean of kernel features, such information is not taken into
524
+ account in reconstruction.
525
+
526
+ .. note::
527
+ When users want to compute inverse transformation for 'linear'
528
+ kernel, it is recommended that they use
529
+ :class:`~sklearn.decomposition.PCA` instead. Unlike
530
+ :class:`~sklearn.decomposition.PCA`,
531
+ :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform``
532
+ does not reconstruct the mean of data when 'linear' kernel is used
533
+ due to the use of centered kernel.
534
+
535
+ Parameters
536
+ ----------
537
+ X : {array-like, sparse matrix} of shape (n_samples, n_components)
538
+ Training vector, where `n_samples` is the number of samples
539
+ and `n_features` is the number of features.
540
+
541
+ Returns
542
+ -------
543
+ X_new : ndarray of shape (n_samples, n_features)
544
+ Returns the instance itself.
545
+
546
+ References
547
+ ----------
548
+ `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
549
+ "Learning to find pre-images."
550
+ Advances in neural information processing systems 16 (2004): 449-456.
551
+ <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
552
+ """
553
+ if not self.fit_inverse_transform:
554
+ raise NotFittedError(
555
+ "The fit_inverse_transform parameter was not"
556
+ " set to True when instantiating and hence "
557
+ "the inverse transform is not available."
558
+ )
559
+
560
+ K = self._get_kernel(X, self.X_transformed_fit_)
561
+ return np.dot(K, self.dual_coef_)
562
+
563
+ def _more_tags(self):
564
+ return {
565
+ "preserves_dtype": [np.float64, np.float32],
566
+ "pairwise": self.kernel == "precomputed",
567
+ }
568
+
569
+ @property
570
+ def _n_features_out(self):
571
+ """Number of transformed output features."""
572
+ return self.eigenvalues_.shape[0]
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_lda.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ =============================================================
4
+ Online Latent Dirichlet Allocation with variational inference
5
+ =============================================================
6
+
7
+ This implementation is modified from Matthew D. Hoffman's onlineldavb code
8
+ Link: https://github.com/blei-lab/onlineldavb
9
+ """
10
+
11
+ # Author: Chyi-Kwei Yau
12
+ # Author: Matthew D. Hoffman (original onlineldavb implementation)
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ import scipy.sparse as sp
17
+ from joblib import effective_n_jobs
18
+ from scipy.special import gammaln, logsumexp
19
+
20
+ from ..base import (
21
+ BaseEstimator,
22
+ ClassNamePrefixFeaturesOutMixin,
23
+ TransformerMixin,
24
+ _fit_context,
25
+ )
26
+ from ..utils import check_random_state, gen_batches, gen_even_slices
27
+ from ..utils._param_validation import Interval, StrOptions
28
+ from ..utils.parallel import Parallel, delayed
29
+ from ..utils.validation import check_is_fitted, check_non_negative
30
+ from ._online_lda_fast import (
31
+ _dirichlet_expectation_1d as cy_dirichlet_expectation_1d,
32
+ )
33
+ from ._online_lda_fast import (
34
+ _dirichlet_expectation_2d,
35
+ )
36
+ from ._online_lda_fast import (
37
+ mean_change as cy_mean_change,
38
+ )
39
+
40
+ EPS = np.finfo(float).eps
41
+
42
+
43
+ def _update_doc_distribution(
44
+ X,
45
+ exp_topic_word_distr,
46
+ doc_topic_prior,
47
+ max_doc_update_iter,
48
+ mean_change_tol,
49
+ cal_sstats,
50
+ random_state,
51
+ ):
52
+ """E-step: update document-topic distribution.
53
+
54
+ Parameters
55
+ ----------
56
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
57
+ Document word matrix.
58
+
59
+ exp_topic_word_distr : ndarray of shape (n_topics, n_features)
60
+ Exponential value of expectation of log topic word distribution.
61
+ In the literature, this is `exp(E[log(beta)])`.
62
+
63
+ doc_topic_prior : float
64
+ Prior of document topic distribution `theta`.
65
+
66
+ max_doc_update_iter : int
67
+ Max number of iterations for updating document topic distribution in
68
+ the E-step.
69
+
70
+ mean_change_tol : float
71
+ Stopping tolerance for updating document topic distribution in E-step.
72
+
73
+ cal_sstats : bool
74
+ Parameter that indicate to calculate sufficient statistics or not.
75
+ Set `cal_sstats` to `True` when we need to run M-step.
76
+
77
+ random_state : RandomState instance or None
78
+ Parameter that indicate how to initialize document topic distribution.
79
+ Set `random_state` to None will initialize document topic distribution
80
+ to a constant number.
81
+
82
+ Returns
83
+ -------
84
+ (doc_topic_distr, suff_stats) :
85
+ `doc_topic_distr` is unnormalized topic distribution for each document.
86
+ In the literature, this is `gamma`. we can calculate `E[log(theta)]`
87
+ from it.
88
+ `suff_stats` is expected sufficient statistics for the M-step.
89
+ When `cal_sstats == False`, this will be None.
90
+
91
+ """
92
+ is_sparse_x = sp.issparse(X)
93
+ n_samples, n_features = X.shape
94
+ n_topics = exp_topic_word_distr.shape[0]
95
+
96
+ if random_state:
97
+ doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype(
98
+ X.dtype, copy=False
99
+ )
100
+ else:
101
+ doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype)
102
+
103
+ # In the literature, this is `exp(E[log(theta)])`
104
+ exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
105
+
106
+ # diff on `component_` (only calculate it when `cal_diff` is True)
107
+ suff_stats = (
108
+ np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None
109
+ )
110
+
111
+ if is_sparse_x:
112
+ X_data = X.data
113
+ X_indices = X.indices
114
+ X_indptr = X.indptr
115
+
116
+ # These cython functions are called in a nested loop on usually very small arrays
117
+ # (length=n_topics). In that case, finding the appropriate signature of the
118
+ # fused-typed function can be more costly than its execution, hence the dispatch
119
+ # is done outside of the loop.
120
+ ctype = "float" if X.dtype == np.float32 else "double"
121
+ mean_change = cy_mean_change[ctype]
122
+ dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype]
123
+ eps = np.finfo(X.dtype).eps
124
+
125
+ for idx_d in range(n_samples):
126
+ if is_sparse_x:
127
+ ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
128
+ cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
129
+ else:
130
+ ids = np.nonzero(X[idx_d, :])[0]
131
+ cnts = X[idx_d, ids]
132
+
133
+ doc_topic_d = doc_topic_distr[idx_d, :]
134
+ # The next one is a copy, since the inner loop overwrites it.
135
+ exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
136
+ exp_topic_word_d = exp_topic_word_distr[:, ids]
137
+
138
+ # Iterate between `doc_topic_d` and `norm_phi` until convergence
139
+ for _ in range(0, max_doc_update_iter):
140
+ last_d = doc_topic_d
141
+
142
+ # The optimal phi_{dwk} is proportional to
143
+ # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
144
+ norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
145
+
146
+ doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T)
147
+ # Note: adds doc_topic_prior to doc_topic_d, in-place.
148
+ dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d)
149
+
150
+ if mean_change(last_d, doc_topic_d) < mean_change_tol:
151
+ break
152
+ doc_topic_distr[idx_d, :] = doc_topic_d
153
+
154
+ # Contribution of document d to the expected sufficient
155
+ # statistics for the M step.
156
+ if cal_sstats:
157
+ norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
158
+ suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
159
+
160
+ return (doc_topic_distr, suff_stats)
161
+
162
+
163
+ class LatentDirichletAllocation(
164
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
165
+ ):
166
+ """Latent Dirichlet Allocation with online variational Bayes algorithm.
167
+
168
+ The implementation is based on [1]_ and [2]_.
169
+
170
+ .. versionadded:: 0.17
171
+
172
+ Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
173
+
174
+ Parameters
175
+ ----------
176
+ n_components : int, default=10
177
+ Number of topics.
178
+
179
+ .. versionchanged:: 0.19
180
+ ``n_topics`` was renamed to ``n_components``
181
+
182
+ doc_topic_prior : float, default=None
183
+ Prior of document topic distribution `theta`. If the value is None,
184
+ defaults to `1 / n_components`.
185
+ In [1]_, this is called `alpha`.
186
+
187
+ topic_word_prior : float, default=None
188
+ Prior of topic word distribution `beta`. If the value is None, defaults
189
+ to `1 / n_components`.
190
+ In [1]_, this is called `eta`.
191
+
192
+ learning_method : {'batch', 'online'}, default='batch'
193
+ Method used to update `_component`. Only used in :meth:`fit` method.
194
+ In general, if the data size is large, the online update will be much
195
+ faster than the batch update.
196
+
197
+ Valid options::
198
+
199
+ 'batch': Batch variational Bayes method. Use all training data in
200
+ each EM update.
201
+ Old `components_` will be overwritten in each iteration.
202
+ 'online': Online variational Bayes method. In each EM update, use
203
+ mini-batch of training data to update the ``components_``
204
+ variable incrementally. The learning rate is controlled by the
205
+ ``learning_decay`` and the ``learning_offset`` parameters.
206
+
207
+ .. versionchanged:: 0.20
208
+ The default learning method is now ``"batch"``.
209
+
210
+ learning_decay : float, default=0.7
211
+ It is a parameter that control learning rate in the online learning
212
+ method. The value should be set between (0.5, 1.0] to guarantee
213
+ asymptotic convergence. When the value is 0.0 and batch_size is
214
+ ``n_samples``, the update method is same as batch learning. In the
215
+ literature, this is called kappa.
216
+
217
+ learning_offset : float, default=10.0
218
+ A (positive) parameter that downweights early iterations in online
219
+ learning. It should be greater than 1.0. In the literature, this is
220
+ called tau_0.
221
+
222
+ max_iter : int, default=10
223
+ The maximum number of passes over the training data (aka epochs).
224
+ It only impacts the behavior in the :meth:`fit` method, and not the
225
+ :meth:`partial_fit` method.
226
+
227
+ batch_size : int, default=128
228
+ Number of documents to use in each EM iteration. Only used in online
229
+ learning.
230
+
231
+ evaluate_every : int, default=-1
232
+ How often to evaluate perplexity. Only used in `fit` method.
233
+ set it to 0 or negative number to not evaluate perplexity in
234
+ training at all. Evaluating perplexity can help you check convergence
235
+ in training process, but it will also increase total training time.
236
+ Evaluating perplexity in every iteration might increase training time
237
+ up to two-fold.
238
+
239
+ total_samples : int, default=1e6
240
+ Total number of documents. Only used in the :meth:`partial_fit` method.
241
+
242
+ perp_tol : float, default=1e-1
243
+ Perplexity tolerance in batch learning. Only used when
244
+ ``evaluate_every`` is greater than 0.
245
+
246
+ mean_change_tol : float, default=1e-3
247
+ Stopping tolerance for updating document topic distribution in E-step.
248
+
249
+ max_doc_update_iter : int, default=100
250
+ Max number of iterations for updating document topic distribution in
251
+ the E-step.
252
+
253
+ n_jobs : int, default=None
254
+ The number of jobs to use in the E-step.
255
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
256
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
257
+ for more details.
258
+
259
+ verbose : int, default=0
260
+ Verbosity level.
261
+
262
+ random_state : int, RandomState instance or None, default=None
263
+ Pass an int for reproducible results across multiple function calls.
264
+ See :term:`Glossary <random_state>`.
265
+
266
+ Attributes
267
+ ----------
268
+ components_ : ndarray of shape (n_components, n_features)
269
+ Variational parameters for topic word distribution. Since the complete
270
+ conditional for topic word distribution is a Dirichlet,
271
+ ``components_[i, j]`` can be viewed as pseudocount that represents the
272
+ number of times word `j` was assigned to topic `i`.
273
+ It can also be viewed as distribution over the words for each topic
274
+ after normalization:
275
+ ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
276
+
277
+ exp_dirichlet_component_ : ndarray of shape (n_components, n_features)
278
+ Exponential value of expectation of log topic word distribution.
279
+ In the literature, this is `exp(E[log(beta)])`.
280
+
281
+ n_batch_iter_ : int
282
+ Number of iterations of the EM step.
283
+
284
+ n_features_in_ : int
285
+ Number of features seen during :term:`fit`.
286
+
287
+ .. versionadded:: 0.24
288
+
289
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
290
+ Names of features seen during :term:`fit`. Defined only when `X`
291
+ has feature names that are all strings.
292
+
293
+ .. versionadded:: 1.0
294
+
295
+ n_iter_ : int
296
+ Number of passes over the dataset.
297
+
298
+ bound_ : float
299
+ Final perplexity score on training set.
300
+
301
+ doc_topic_prior_ : float
302
+ Prior of document topic distribution `theta`. If the value is None,
303
+ it is `1 / n_components`.
304
+
305
+ random_state_ : RandomState instance
306
+ RandomState instance that is generated either from a seed, the random
307
+ number generator or by `np.random`.
308
+
309
+ topic_word_prior_ : float
310
+ Prior of topic word distribution `beta`. If the value is None, it is
311
+ `1 / n_components`.
312
+
313
+ See Also
314
+ --------
315
+ sklearn.discriminant_analysis.LinearDiscriminantAnalysis:
316
+ A classifier with a linear decision boundary, generated by fitting
317
+ class conditional densities to the data and using Bayes' rule.
318
+
319
+ References
320
+ ----------
321
+ .. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D.
322
+ Hoffman, David M. Blei, Francis Bach, 2010
323
+ https://github.com/blei-lab/onlineldavb
324
+
325
+ .. [2] "Stochastic Variational Inference", Matthew D. Hoffman,
326
+ David M. Blei, Chong Wang, John Paisley, 2013
327
+
328
+ Examples
329
+ --------
330
+ >>> from sklearn.decomposition import LatentDirichletAllocation
331
+ >>> from sklearn.datasets import make_multilabel_classification
332
+ >>> # This produces a feature matrix of token counts, similar to what
333
+ >>> # CountVectorizer would produce on text.
334
+ >>> X, _ = make_multilabel_classification(random_state=0)
335
+ >>> lda = LatentDirichletAllocation(n_components=5,
336
+ ... random_state=0)
337
+ >>> lda.fit(X)
338
+ LatentDirichletAllocation(...)
339
+ >>> # get topics for some given samples:
340
+ >>> lda.transform(X[-2:])
341
+ array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846],
342
+ [0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]])
343
+ """
344
+
345
+ _parameter_constraints: dict = {
346
+ "n_components": [Interval(Integral, 0, None, closed="neither")],
347
+ "doc_topic_prior": [None, Interval(Real, 0, 1, closed="both")],
348
+ "topic_word_prior": [None, Interval(Real, 0, 1, closed="both")],
349
+ "learning_method": [StrOptions({"batch", "online"})],
350
+ "learning_decay": [Interval(Real, 0, 1, closed="both")],
351
+ "learning_offset": [Interval(Real, 1.0, None, closed="left")],
352
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
353
+ "batch_size": [Interval(Integral, 0, None, closed="neither")],
354
+ "evaluate_every": [Interval(Integral, None, None, closed="neither")],
355
+ "total_samples": [Interval(Real, 0, None, closed="neither")],
356
+ "perp_tol": [Interval(Real, 0, None, closed="left")],
357
+ "mean_change_tol": [Interval(Real, 0, None, closed="left")],
358
+ "max_doc_update_iter": [Interval(Integral, 0, None, closed="left")],
359
+ "n_jobs": [None, Integral],
360
+ "verbose": ["verbose"],
361
+ "random_state": ["random_state"],
362
+ }
363
+
364
+ def __init__(
365
+ self,
366
+ n_components=10,
367
+ *,
368
+ doc_topic_prior=None,
369
+ topic_word_prior=None,
370
+ learning_method="batch",
371
+ learning_decay=0.7,
372
+ learning_offset=10.0,
373
+ max_iter=10,
374
+ batch_size=128,
375
+ evaluate_every=-1,
376
+ total_samples=1e6,
377
+ perp_tol=1e-1,
378
+ mean_change_tol=1e-3,
379
+ max_doc_update_iter=100,
380
+ n_jobs=None,
381
+ verbose=0,
382
+ random_state=None,
383
+ ):
384
+ self.n_components = n_components
385
+ self.doc_topic_prior = doc_topic_prior
386
+ self.topic_word_prior = topic_word_prior
387
+ self.learning_method = learning_method
388
+ self.learning_decay = learning_decay
389
+ self.learning_offset = learning_offset
390
+ self.max_iter = max_iter
391
+ self.batch_size = batch_size
392
+ self.evaluate_every = evaluate_every
393
+ self.total_samples = total_samples
394
+ self.perp_tol = perp_tol
395
+ self.mean_change_tol = mean_change_tol
396
+ self.max_doc_update_iter = max_doc_update_iter
397
+ self.n_jobs = n_jobs
398
+ self.verbose = verbose
399
+ self.random_state = random_state
400
+
401
+ def _init_latent_vars(self, n_features, dtype=np.float64):
402
+ """Initialize latent variables."""
403
+
404
+ self.random_state_ = check_random_state(self.random_state)
405
+ self.n_batch_iter_ = 1
406
+ self.n_iter_ = 0
407
+
408
+ if self.doc_topic_prior is None:
409
+ self.doc_topic_prior_ = 1.0 / self.n_components
410
+ else:
411
+ self.doc_topic_prior_ = self.doc_topic_prior
412
+
413
+ if self.topic_word_prior is None:
414
+ self.topic_word_prior_ = 1.0 / self.n_components
415
+ else:
416
+ self.topic_word_prior_ = self.topic_word_prior
417
+
418
+ init_gamma = 100.0
419
+ init_var = 1.0 / init_gamma
420
+ # In the literature, this is called `lambda`
421
+ self.components_ = self.random_state_.gamma(
422
+ init_gamma, init_var, (self.n_components, n_features)
423
+ ).astype(dtype, copy=False)
424
+
425
+ # In the literature, this is `exp(E[log(beta)])`
426
+ self.exp_dirichlet_component_ = np.exp(
427
+ _dirichlet_expectation_2d(self.components_)
428
+ )
429
+
430
+ def _e_step(self, X, cal_sstats, random_init, parallel=None):
431
+ """E-step in EM update.
432
+
433
+ Parameters
434
+ ----------
435
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
436
+ Document word matrix.
437
+
438
+ cal_sstats : bool
439
+ Parameter that indicate whether to calculate sufficient statistics
440
+ or not. Set ``cal_sstats`` to True when we need to run M-step.
441
+
442
+ random_init : bool
443
+ Parameter that indicate whether to initialize document topic
444
+ distribution randomly in the E-step. Set it to True in training
445
+ steps.
446
+
447
+ parallel : joblib.Parallel, default=None
448
+ Pre-initialized instance of joblib.Parallel.
449
+
450
+ Returns
451
+ -------
452
+ (doc_topic_distr, suff_stats) :
453
+ `doc_topic_distr` is unnormalized topic distribution for each
454
+ document. In the literature, this is called `gamma`.
455
+ `suff_stats` is expected sufficient statistics for the M-step.
456
+ When `cal_sstats == False`, it will be None.
457
+
458
+ """
459
+
460
+ # Run e-step in parallel
461
+ random_state = self.random_state_ if random_init else None
462
+
463
+ # TODO: make Parallel._effective_n_jobs public instead?
464
+ n_jobs = effective_n_jobs(self.n_jobs)
465
+ if parallel is None:
466
+ parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
467
+ results = parallel(
468
+ delayed(_update_doc_distribution)(
469
+ X[idx_slice, :],
470
+ self.exp_dirichlet_component_,
471
+ self.doc_topic_prior_,
472
+ self.max_doc_update_iter,
473
+ self.mean_change_tol,
474
+ cal_sstats,
475
+ random_state,
476
+ )
477
+ for idx_slice in gen_even_slices(X.shape[0], n_jobs)
478
+ )
479
+
480
+ # merge result
481
+ doc_topics, sstats_list = zip(*results)
482
+ doc_topic_distr = np.vstack(doc_topics)
483
+
484
+ if cal_sstats:
485
+ # This step finishes computing the sufficient statistics for the
486
+ # M-step.
487
+ suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
488
+ for sstats in sstats_list:
489
+ suff_stats += sstats
490
+ suff_stats *= self.exp_dirichlet_component_
491
+ else:
492
+ suff_stats = None
493
+
494
+ return (doc_topic_distr, suff_stats)
495
+
496
+ def _em_step(self, X, total_samples, batch_update, parallel=None):
497
+ """EM update for 1 iteration.
498
+
499
+ update `_component` by batch VB or online VB.
500
+
501
+ Parameters
502
+ ----------
503
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
504
+ Document word matrix.
505
+
506
+ total_samples : int
507
+ Total number of documents. It is only used when
508
+ batch_update is `False`.
509
+
510
+ batch_update : bool
511
+ Parameter that controls updating method.
512
+ `True` for batch learning, `False` for online learning.
513
+
514
+ parallel : joblib.Parallel, default=None
515
+ Pre-initialized instance of joblib.Parallel
516
+
517
+ Returns
518
+ -------
519
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
520
+ Unnormalized document topic distribution.
521
+ """
522
+
523
+ # E-step
524
+ _, suff_stats = self._e_step(
525
+ X, cal_sstats=True, random_init=True, parallel=parallel
526
+ )
527
+
528
+ # M-step
529
+ if batch_update:
530
+ self.components_ = self.topic_word_prior_ + suff_stats
531
+ else:
532
+ # online update
533
+ # In the literature, the weight is `rho`
534
+ weight = np.power(
535
+ self.learning_offset + self.n_batch_iter_, -self.learning_decay
536
+ )
537
+ doc_ratio = float(total_samples) / X.shape[0]
538
+ self.components_ *= 1 - weight
539
+ self.components_ += weight * (
540
+ self.topic_word_prior_ + doc_ratio * suff_stats
541
+ )
542
+
543
+ # update `component_` related variables
544
+ self.exp_dirichlet_component_ = np.exp(
545
+ _dirichlet_expectation_2d(self.components_)
546
+ )
547
+ self.n_batch_iter_ += 1
548
+ return
549
+
550
+ def _more_tags(self):
551
+ return {
552
+ "preserves_dtype": [np.float64, np.float32],
553
+ "requires_positive_X": True,
554
+ }
555
+
556
+ def _check_non_neg_array(self, X, reset_n_features, whom):
557
+ """check X format
558
+
559
+ check X format and make sure no negative value in X.
560
+
561
+ Parameters
562
+ ----------
563
+ X : array-like or sparse matrix
564
+
565
+ """
566
+ dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype
567
+
568
+ X = self._validate_data(
569
+ X,
570
+ reset=reset_n_features,
571
+ accept_sparse="csr",
572
+ dtype=dtype,
573
+ )
574
+ check_non_negative(X, whom)
575
+
576
+ return X
577
+
578
+ @_fit_context(prefer_skip_nested_validation=True)
579
+ def partial_fit(self, X, y=None):
580
+ """Online VB with Mini-Batch update.
581
+
582
+ Parameters
583
+ ----------
584
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
585
+ Document word matrix.
586
+
587
+ y : Ignored
588
+ Not used, present here for API consistency by convention.
589
+
590
+ Returns
591
+ -------
592
+ self
593
+ Partially fitted estimator.
594
+ """
595
+ first_time = not hasattr(self, "components_")
596
+
597
+ X = self._check_non_neg_array(
598
+ X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit"
599
+ )
600
+ n_samples, n_features = X.shape
601
+ batch_size = self.batch_size
602
+
603
+ # initialize parameters or check
604
+ if first_time:
605
+ self._init_latent_vars(n_features, dtype=X.dtype)
606
+
607
+ if n_features != self.components_.shape[1]:
608
+ raise ValueError(
609
+ "The provided data has %d dimensions while "
610
+ "the model was trained with feature size %d."
611
+ % (n_features, self.components_.shape[1])
612
+ )
613
+
614
+ n_jobs = effective_n_jobs(self.n_jobs)
615
+ with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
616
+ for idx_slice in gen_batches(n_samples, batch_size):
617
+ self._em_step(
618
+ X[idx_slice, :],
619
+ total_samples=self.total_samples,
620
+ batch_update=False,
621
+ parallel=parallel,
622
+ )
623
+
624
+ return self
625
+
626
+ @_fit_context(prefer_skip_nested_validation=True)
627
+ def fit(self, X, y=None):
628
+ """Learn model for the data X with variational Bayes method.
629
+
630
+ When `learning_method` is 'online', use mini-batch update.
631
+ Otherwise, use batch update.
632
+
633
+ Parameters
634
+ ----------
635
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
636
+ Document word matrix.
637
+
638
+ y : Ignored
639
+ Not used, present here for API consistency by convention.
640
+
641
+ Returns
642
+ -------
643
+ self
644
+ Fitted estimator.
645
+ """
646
+ X = self._check_non_neg_array(
647
+ X, reset_n_features=True, whom="LatentDirichletAllocation.fit"
648
+ )
649
+ n_samples, n_features = X.shape
650
+ max_iter = self.max_iter
651
+ evaluate_every = self.evaluate_every
652
+ learning_method = self.learning_method
653
+
654
+ batch_size = self.batch_size
655
+
656
+ # initialize parameters
657
+ self._init_latent_vars(n_features, dtype=X.dtype)
658
+ # change to perplexity later
659
+ last_bound = None
660
+ n_jobs = effective_n_jobs(self.n_jobs)
661
+ with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
662
+ for i in range(max_iter):
663
+ if learning_method == "online":
664
+ for idx_slice in gen_batches(n_samples, batch_size):
665
+ self._em_step(
666
+ X[idx_slice, :],
667
+ total_samples=n_samples,
668
+ batch_update=False,
669
+ parallel=parallel,
670
+ )
671
+ else:
672
+ # batch update
673
+ self._em_step(
674
+ X, total_samples=n_samples, batch_update=True, parallel=parallel
675
+ )
676
+
677
+ # check perplexity
678
+ if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
679
+ doc_topics_distr, _ = self._e_step(
680
+ X, cal_sstats=False, random_init=False, parallel=parallel
681
+ )
682
+ bound = self._perplexity_precomp_distr(
683
+ X, doc_topics_distr, sub_sampling=False
684
+ )
685
+ if self.verbose:
686
+ print(
687
+ "iteration: %d of max_iter: %d, perplexity: %.4f"
688
+ % (i + 1, max_iter, bound)
689
+ )
690
+
691
+ if last_bound and abs(last_bound - bound) < self.perp_tol:
692
+ break
693
+ last_bound = bound
694
+
695
+ elif self.verbose:
696
+ print("iteration: %d of max_iter: %d" % (i + 1, max_iter))
697
+ self.n_iter_ += 1
698
+
699
+ # calculate final perplexity value on train set
700
+ doc_topics_distr, _ = self._e_step(
701
+ X, cal_sstats=False, random_init=False, parallel=parallel
702
+ )
703
+ self.bound_ = self._perplexity_precomp_distr(
704
+ X, doc_topics_distr, sub_sampling=False
705
+ )
706
+
707
+ return self
708
+
709
+ def _unnormalized_transform(self, X):
710
+ """Transform data X according to fitted model.
711
+
712
+ Parameters
713
+ ----------
714
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
715
+ Document word matrix.
716
+
717
+ Returns
718
+ -------
719
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
720
+ Document topic distribution for X.
721
+ """
722
+ doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False)
723
+
724
+ return doc_topic_distr
725
+
726
+ def transform(self, X):
727
+ """Transform data X according to the fitted model.
728
+
729
+ .. versionchanged:: 0.18
730
+ *doc_topic_distr* is now normalized
731
+
732
+ Parameters
733
+ ----------
734
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
735
+ Document word matrix.
736
+
737
+ Returns
738
+ -------
739
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
740
+ Document topic distribution for X.
741
+ """
742
+ check_is_fitted(self)
743
+ X = self._check_non_neg_array(
744
+ X, reset_n_features=False, whom="LatentDirichletAllocation.transform"
745
+ )
746
+ doc_topic_distr = self._unnormalized_transform(X)
747
+ doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
748
+ return doc_topic_distr
749
+
750
+ def _approx_bound(self, X, doc_topic_distr, sub_sampling):
751
+ """Estimate the variational bound.
752
+
753
+ Estimate the variational bound over "all documents" using only the
754
+ documents passed in as X. Since log-likelihood of each word cannot
755
+ be computed directly, we use this bound to estimate it.
756
+
757
+ Parameters
758
+ ----------
759
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
760
+ Document word matrix.
761
+
762
+ doc_topic_distr : ndarray of shape (n_samples, n_components)
763
+ Document topic distribution. In the literature, this is called
764
+ gamma.
765
+
766
+ sub_sampling : bool, default=False
767
+ Compensate for subsampling of documents.
768
+ It is used in calculate bound in online learning.
769
+
770
+ Returns
771
+ -------
772
+ score : float
773
+
774
+ """
775
+
776
+ def _loglikelihood(prior, distr, dirichlet_distr, size):
777
+ # calculate log-likelihood
778
+ score = np.sum((prior - distr) * dirichlet_distr)
779
+ score += np.sum(gammaln(distr) - gammaln(prior))
780
+ score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
781
+ return score
782
+
783
+ is_sparse_x = sp.issparse(X)
784
+ n_samples, n_components = doc_topic_distr.shape
785
+ n_features = self.components_.shape[1]
786
+ score = 0
787
+
788
+ dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
789
+ dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
790
+ doc_topic_prior = self.doc_topic_prior_
791
+ topic_word_prior = self.topic_word_prior_
792
+
793
+ if is_sparse_x:
794
+ X_data = X.data
795
+ X_indices = X.indices
796
+ X_indptr = X.indptr
797
+
798
+ # E[log p(docs | theta, beta)]
799
+ for idx_d in range(0, n_samples):
800
+ if is_sparse_x:
801
+ ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
802
+ cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
803
+ else:
804
+ ids = np.nonzero(X[idx_d, :])[0]
805
+ cnts = X[idx_d, ids]
806
+ temp = (
807
+ dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
808
+ )
809
+ norm_phi = logsumexp(temp, axis=0)
810
+ score += np.dot(cnts, norm_phi)
811
+
812
+ # compute E[log p(theta | alpha) - log q(theta | gamma)]
813
+ score += _loglikelihood(
814
+ doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components
815
+ )
816
+
817
+ # Compensate for the subsampling of the population of documents
818
+ if sub_sampling:
819
+ doc_ratio = float(self.total_samples) / n_samples
820
+ score *= doc_ratio
821
+
822
+ # E[log p(beta | eta) - log q (beta | lambda)]
823
+ score += _loglikelihood(
824
+ topic_word_prior, self.components_, dirichlet_component_, n_features
825
+ )
826
+
827
+ return score
828
+
829
+ def score(self, X, y=None):
830
+ """Calculate approximate log-likelihood as score.
831
+
832
+ Parameters
833
+ ----------
834
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
835
+ Document word matrix.
836
+
837
+ y : Ignored
838
+ Not used, present here for API consistency by convention.
839
+
840
+ Returns
841
+ -------
842
+ score : float
843
+ Use approximate bound as score.
844
+ """
845
+ check_is_fitted(self)
846
+ X = self._check_non_neg_array(
847
+ X, reset_n_features=False, whom="LatentDirichletAllocation.score"
848
+ )
849
+
850
+ doc_topic_distr = self._unnormalized_transform(X)
851
+ score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
852
+ return score
853
+
854
+ def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
855
+ """Calculate approximate perplexity for data X with ability to accept
856
+ precomputed doc_topic_distr
857
+
858
+ Perplexity is defined as exp(-1. * log-likelihood per word)
859
+
860
+ Parameters
861
+ ----------
862
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
863
+ Document word matrix.
864
+
865
+ doc_topic_distr : ndarray of shape (n_samples, n_components), \
866
+ default=None
867
+ Document topic distribution.
868
+ If it is None, it will be generated by applying transform on X.
869
+
870
+ Returns
871
+ -------
872
+ score : float
873
+ Perplexity score.
874
+ """
875
+ if doc_topic_distr is None:
876
+ doc_topic_distr = self._unnormalized_transform(X)
877
+ else:
878
+ n_samples, n_components = doc_topic_distr.shape
879
+ if n_samples != X.shape[0]:
880
+ raise ValueError(
881
+ "Number of samples in X and doc_topic_distr do not match."
882
+ )
883
+
884
+ if n_components != self.n_components:
885
+ raise ValueError("Number of topics does not match.")
886
+
887
+ current_samples = X.shape[0]
888
+ bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
889
+
890
+ if sub_sampling:
891
+ word_cnt = X.sum() * (float(self.total_samples) / current_samples)
892
+ else:
893
+ word_cnt = X.sum()
894
+ perword_bound = bound / word_cnt
895
+
896
+ return np.exp(-1.0 * perword_bound)
897
+
898
+ def perplexity(self, X, sub_sampling=False):
899
+ """Calculate approximate perplexity for data X.
900
+
901
+ Perplexity is defined as exp(-1. * log-likelihood per word)
902
+
903
+ .. versionchanged:: 0.19
904
+ *doc_topic_distr* argument has been deprecated and is ignored
905
+ because user no longer has access to unnormalized distribution
906
+
907
+ Parameters
908
+ ----------
909
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
910
+ Document word matrix.
911
+
912
+ sub_sampling : bool
913
+ Do sub-sampling or not.
914
+
915
+ Returns
916
+ -------
917
+ score : float
918
+ Perplexity score.
919
+ """
920
+ check_is_fitted(self)
921
+ X = self._check_non_neg_array(
922
+ X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity"
923
+ )
924
+ return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
925
+
926
+ @property
927
+ def _n_features_out(self):
928
+ """Number of transformed output features."""
929
+ return self.components_.shape[0]
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py ADDED
@@ -0,0 +1,2443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Non-negative matrix factorization.
2
+ """
3
+ # Author: Vlad Niculae
4
+ # Lars Buitinck
5
+ # Mathieu Blondel <[email protected]>
6
+ # Tom Dupre la Tour
7
+ # License: BSD 3 clause
8
+
9
+ import itertools
10
+ import time
11
+ import warnings
12
+ from abc import ABC
13
+ from math import sqrt
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ import scipy.sparse as sp
18
+ from scipy import linalg
19
+
20
+ from .._config import config_context
21
+ from ..base import (
22
+ BaseEstimator,
23
+ ClassNamePrefixFeaturesOutMixin,
24
+ TransformerMixin,
25
+ _fit_context,
26
+ )
27
+ from ..exceptions import ConvergenceWarning
28
+ from ..utils import check_array, check_random_state, gen_batches, metadata_routing
29
+ from ..utils._param_validation import (
30
+ Hidden,
31
+ Interval,
32
+ StrOptions,
33
+ validate_params,
34
+ )
35
+ from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
36
+ from ..utils.validation import (
37
+ check_is_fitted,
38
+ check_non_negative,
39
+ )
40
+ from ._cdnmf_fast import _update_cdnmf_fast
41
+
42
+ EPSILON = np.finfo(np.float32).eps
43
+
44
+
45
+ def norm(x):
46
+ """Dot product-based Euclidean norm implementation.
47
+
48
+ See: http://fa.bianp.net/blog/2011/computing-the-vector-norm/
49
+
50
+ Parameters
51
+ ----------
52
+ x : array-like
53
+ Vector for which to compute the norm.
54
+ """
55
+ return sqrt(squared_norm(x))
56
+
57
+
58
+ def trace_dot(X, Y):
59
+ """Trace of np.dot(X, Y.T).
60
+
61
+ Parameters
62
+ ----------
63
+ X : array-like
64
+ First matrix.
65
+ Y : array-like
66
+ Second matrix.
67
+ """
68
+ return np.dot(X.ravel(), Y.ravel())
69
+
70
+
71
+ def _check_init(A, shape, whom):
72
+ A = check_array(A)
73
+ if shape[0] != "auto" and A.shape[0] != shape[0]:
74
+ raise ValueError(
75
+ f"Array with wrong first dimension passed to {whom}. Expected {shape[0]}, "
76
+ f"but got {A.shape[0]}."
77
+ )
78
+ if shape[1] != "auto" and A.shape[1] != shape[1]:
79
+ raise ValueError(
80
+ f"Array with wrong second dimension passed to {whom}. Expected {shape[1]}, "
81
+ f"but got {A.shape[1]}."
82
+ )
83
+ check_non_negative(A, whom)
84
+ if np.max(A) == 0:
85
+ raise ValueError(f"Array passed to {whom} is full of zeros.")
86
+
87
+
88
+ def _beta_divergence(X, W, H, beta, square_root=False):
89
+ """Compute the beta-divergence of X and dot(W, H).
90
+
91
+ Parameters
92
+ ----------
93
+ X : float or array-like of shape (n_samples, n_features)
94
+
95
+ W : float or array-like of shape (n_samples, n_components)
96
+
97
+ H : float or array-like of shape (n_components, n_features)
98
+
99
+ beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}
100
+ Parameter of the beta-divergence.
101
+ If beta == 2, this is half the Frobenius *squared* norm.
102
+ If beta == 1, this is the generalized Kullback-Leibler divergence.
103
+ If beta == 0, this is the Itakura-Saito divergence.
104
+ Else, this is the general beta-divergence.
105
+
106
+ square_root : bool, default=False
107
+ If True, return np.sqrt(2 * res)
108
+ For beta == 2, it corresponds to the Frobenius norm.
109
+
110
+ Returns
111
+ -------
112
+ res : float
113
+ Beta divergence of X and np.dot(X, H).
114
+ """
115
+ beta = _beta_loss_to_float(beta)
116
+
117
+ # The method can be called with scalars
118
+ if not sp.issparse(X):
119
+ X = np.atleast_2d(X)
120
+ W = np.atleast_2d(W)
121
+ H = np.atleast_2d(H)
122
+
123
+ # Frobenius norm
124
+ if beta == 2:
125
+ # Avoid the creation of the dense np.dot(W, H) if X is sparse.
126
+ if sp.issparse(X):
127
+ norm_X = np.dot(X.data, X.data)
128
+ norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
129
+ cross_prod = trace_dot((X @ H.T), W)
130
+ res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
131
+ else:
132
+ res = squared_norm(X - np.dot(W, H)) / 2.0
133
+
134
+ if square_root:
135
+ return np.sqrt(res * 2)
136
+ else:
137
+ return res
138
+
139
+ if sp.issparse(X):
140
+ # compute np.dot(W, H) only where X is nonzero
141
+ WH_data = _special_sparse_dot(W, H, X).data
142
+ X_data = X.data
143
+ else:
144
+ WH = np.dot(W, H)
145
+ WH_data = WH.ravel()
146
+ X_data = X.ravel()
147
+
148
+ # do not affect the zeros: here 0 ** (-1) = 0 and not infinity
149
+ indices = X_data > EPSILON
150
+ WH_data = WH_data[indices]
151
+ X_data = X_data[indices]
152
+
153
+ # used to avoid division by zero
154
+ WH_data[WH_data < EPSILON] = EPSILON
155
+
156
+ # generalized Kullback-Leibler divergence
157
+ if beta == 1:
158
+ # fast and memory efficient computation of np.sum(np.dot(W, H))
159
+ sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
160
+ # computes np.sum(X * log(X / WH)) only where X is nonzero
161
+ div = X_data / WH_data
162
+ res = np.dot(X_data, np.log(div))
163
+ # add full np.sum(np.dot(W, H)) - np.sum(X)
164
+ res += sum_WH - X_data.sum()
165
+
166
+ # Itakura-Saito divergence
167
+ elif beta == 0:
168
+ div = X_data / WH_data
169
+ res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
170
+
171
+ # beta-divergence, beta not in (0, 1, 2)
172
+ else:
173
+ if sp.issparse(X):
174
+ # slow loop, but memory efficient computation of :
175
+ # np.sum(np.dot(W, H) ** beta)
176
+ sum_WH_beta = 0
177
+ for i in range(X.shape[1]):
178
+ sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
179
+
180
+ else:
181
+ sum_WH_beta = np.sum(WH**beta)
182
+
183
+ sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
184
+ res = (X_data**beta).sum() - beta * sum_X_WH
185
+ res += sum_WH_beta * (beta - 1)
186
+ res /= beta * (beta - 1)
187
+
188
+ if square_root:
189
+ res = max(res, 0) # avoid negative number due to rounding errors
190
+ return np.sqrt(2 * res)
191
+ else:
192
+ return res
193
+
194
+
195
+ def _special_sparse_dot(W, H, X):
196
+ """Computes np.dot(W, H), only where X is non zero."""
197
+ if sp.issparse(X):
198
+ ii, jj = X.nonzero()
199
+ n_vals = ii.shape[0]
200
+ dot_vals = np.empty(n_vals)
201
+ n_components = W.shape[1]
202
+
203
+ batch_size = max(n_components, n_vals // n_components)
204
+ for start in range(0, n_vals, batch_size):
205
+ batch = slice(start, start + batch_size)
206
+ dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(
207
+ axis=1
208
+ )
209
+
210
+ WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
211
+ return WH.tocsr()
212
+ else:
213
+ return np.dot(W, H)
214
+
215
+
216
+ def _beta_loss_to_float(beta_loss):
217
+ """Convert string beta_loss to float."""
218
+ beta_loss_map = {"frobenius": 2, "kullback-leibler": 1, "itakura-saito": 0}
219
+ if isinstance(beta_loss, str):
220
+ beta_loss = beta_loss_map[beta_loss]
221
+ return beta_loss
222
+
223
+
224
+ def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None):
225
+ """Algorithms for NMF initialization.
226
+
227
+ Computes an initial guess for the non-negative
228
+ rank k matrix approximation for X: X = WH.
229
+
230
+ Parameters
231
+ ----------
232
+ X : array-like of shape (n_samples, n_features)
233
+ The data matrix to be decomposed.
234
+
235
+ n_components : int
236
+ The number of components desired in the approximation.
237
+
238
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None
239
+ Method used to initialize the procedure.
240
+ Valid options:
241
+
242
+ - None: 'nndsvda' if n_components <= min(n_samples, n_features),
243
+ otherwise 'random'.
244
+
245
+ - 'random': non-negative random matrices, scaled with:
246
+ sqrt(X.mean() / n_components)
247
+
248
+ - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
249
+ initialization (better for sparseness)
250
+
251
+ - 'nndsvda': NNDSVD with zeros filled with the average of X
252
+ (better when sparsity is not desired)
253
+
254
+ - 'nndsvdar': NNDSVD with zeros filled with small random values
255
+ (generally faster, less accurate alternative to NNDSVDa
256
+ for when sparsity is not desired)
257
+
258
+ - 'custom': use custom matrices W and H
259
+
260
+ .. versionchanged:: 1.1
261
+ When `init=None` and n_components is less than n_samples and n_features
262
+ defaults to `nndsvda` instead of `nndsvd`.
263
+
264
+ eps : float, default=1e-6
265
+ Truncate all values less then this in output to zero.
266
+
267
+ random_state : int, RandomState instance or None, default=None
268
+ Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
269
+ reproducible results across multiple function calls.
270
+ See :term:`Glossary <random_state>`.
271
+
272
+ Returns
273
+ -------
274
+ W : array-like of shape (n_samples, n_components)
275
+ Initial guesses for solving X ~= WH.
276
+
277
+ H : array-like of shape (n_components, n_features)
278
+ Initial guesses for solving X ~= WH.
279
+
280
+ References
281
+ ----------
282
+ C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
283
+ nonnegative matrix factorization - Pattern Recognition, 2008
284
+ http://tinyurl.com/nndsvd
285
+ """
286
+ check_non_negative(X, "NMF initialization")
287
+ n_samples, n_features = X.shape
288
+
289
+ if (
290
+ init is not None
291
+ and init != "random"
292
+ and n_components > min(n_samples, n_features)
293
+ ):
294
+ raise ValueError(
295
+ "init = '{}' can only be used when "
296
+ "n_components <= min(n_samples, n_features)".format(init)
297
+ )
298
+
299
+ if init is None:
300
+ if n_components <= min(n_samples, n_features):
301
+ init = "nndsvda"
302
+ else:
303
+ init = "random"
304
+
305
+ # Random initialization
306
+ if init == "random":
307
+ avg = np.sqrt(X.mean() / n_components)
308
+ rng = check_random_state(random_state)
309
+ H = avg * rng.standard_normal(size=(n_components, n_features)).astype(
310
+ X.dtype, copy=False
311
+ )
312
+ W = avg * rng.standard_normal(size=(n_samples, n_components)).astype(
313
+ X.dtype, copy=False
314
+ )
315
+ np.abs(H, out=H)
316
+ np.abs(W, out=W)
317
+ return W, H
318
+
319
+ # NNDSVD initialization
320
+ U, S, V = randomized_svd(X, n_components, random_state=random_state)
321
+ W = np.zeros_like(U)
322
+ H = np.zeros_like(V)
323
+
324
+ # The leading singular triplet is non-negative
325
+ # so it can be used as is for initialization.
326
+ W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
327
+ H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
328
+
329
+ for j in range(1, n_components):
330
+ x, y = U[:, j], V[j, :]
331
+
332
+ # extract positive and negative parts of column vectors
333
+ x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
334
+ x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
335
+
336
+ # and their norms
337
+ x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
338
+ x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
339
+
340
+ m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
341
+
342
+ # choose update
343
+ if m_p > m_n:
344
+ u = x_p / x_p_nrm
345
+ v = y_p / y_p_nrm
346
+ sigma = m_p
347
+ else:
348
+ u = x_n / x_n_nrm
349
+ v = y_n / y_n_nrm
350
+ sigma = m_n
351
+
352
+ lbd = np.sqrt(S[j] * sigma)
353
+ W[:, j] = lbd * u
354
+ H[j, :] = lbd * v
355
+
356
+ W[W < eps] = 0
357
+ H[H < eps] = 0
358
+
359
+ if init == "nndsvd":
360
+ pass
361
+ elif init == "nndsvda":
362
+ avg = X.mean()
363
+ W[W == 0] = avg
364
+ H[H == 0] = avg
365
+ elif init == "nndsvdar":
366
+ rng = check_random_state(random_state)
367
+ avg = X.mean()
368
+ W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100)
369
+ H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100)
370
+ else:
371
+ raise ValueError(
372
+ "Invalid init parameter: got %r instead of one of %r"
373
+ % (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar"))
374
+ )
375
+
376
+ return W, H
377
+
378
+
379
+ def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state):
380
+ """Helper function for _fit_coordinate_descent.
381
+
382
+ Update W to minimize the objective function, iterating once over all
383
+ coordinates. By symmetry, to update H, one can call
384
+ _update_coordinate_descent(X.T, Ht, W, ...).
385
+
386
+ """
387
+ n_components = Ht.shape[1]
388
+
389
+ HHt = np.dot(Ht.T, Ht)
390
+ XHt = safe_sparse_dot(X, Ht)
391
+
392
+ # L2 regularization corresponds to increase of the diagonal of HHt
393
+ if l2_reg != 0.0:
394
+ # adds l2_reg only on the diagonal
395
+ HHt.flat[:: n_components + 1] += l2_reg
396
+ # L1 regularization corresponds to decrease of each element of XHt
397
+ if l1_reg != 0.0:
398
+ XHt -= l1_reg
399
+
400
+ if shuffle:
401
+ permutation = random_state.permutation(n_components)
402
+ else:
403
+ permutation = np.arange(n_components)
404
+ # The following seems to be required on 64-bit Windows w/ Python 3.5.
405
+ permutation = np.asarray(permutation, dtype=np.intp)
406
+ return _update_cdnmf_fast(W, HHt, XHt, permutation)
407
+
408
+
409
+ def _fit_coordinate_descent(
410
+ X,
411
+ W,
412
+ H,
413
+ tol=1e-4,
414
+ max_iter=200,
415
+ l1_reg_W=0,
416
+ l1_reg_H=0,
417
+ l2_reg_W=0,
418
+ l2_reg_H=0,
419
+ update_H=True,
420
+ verbose=0,
421
+ shuffle=False,
422
+ random_state=None,
423
+ ):
424
+ """Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
425
+
426
+ The objective function is minimized with an alternating minimization of W
427
+ and H. Each minimization is done with a cyclic (up to a permutation of the
428
+ features) Coordinate Descent.
429
+
430
+ Parameters
431
+ ----------
432
+ X : array-like of shape (n_samples, n_features)
433
+ Constant matrix.
434
+
435
+ W : array-like of shape (n_samples, n_components)
436
+ Initial guess for the solution.
437
+
438
+ H : array-like of shape (n_components, n_features)
439
+ Initial guess for the solution.
440
+
441
+ tol : float, default=1e-4
442
+ Tolerance of the stopping condition.
443
+
444
+ max_iter : int, default=200
445
+ Maximum number of iterations before timing out.
446
+
447
+ l1_reg_W : float, default=0.
448
+ L1 regularization parameter for W.
449
+
450
+ l1_reg_H : float, default=0.
451
+ L1 regularization parameter for H.
452
+
453
+ l2_reg_W : float, default=0.
454
+ L2 regularization parameter for W.
455
+
456
+ l2_reg_H : float, default=0.
457
+ L2 regularization parameter for H.
458
+
459
+ update_H : bool, default=True
460
+ Set to True, both W and H will be estimated from initial guesses.
461
+ Set to False, only W will be estimated.
462
+
463
+ verbose : int, default=0
464
+ The verbosity level.
465
+
466
+ shuffle : bool, default=False
467
+ If true, randomize the order of coordinates in the CD solver.
468
+
469
+ random_state : int, RandomState instance or None, default=None
470
+ Used to randomize the coordinates in the CD solver, when
471
+ ``shuffle`` is set to ``True``. Pass an int for reproducible
472
+ results across multiple function calls.
473
+ See :term:`Glossary <random_state>`.
474
+
475
+ Returns
476
+ -------
477
+ W : ndarray of shape (n_samples, n_components)
478
+ Solution to the non-negative least squares problem.
479
+
480
+ H : ndarray of shape (n_components, n_features)
481
+ Solution to the non-negative least squares problem.
482
+
483
+ n_iter : int
484
+ The number of iterations done by the algorithm.
485
+
486
+ References
487
+ ----------
488
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
489
+ factorizations" <10.1587/transfun.E92.A.708>`
490
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
491
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
492
+ """
493
+ # so W and Ht are both in C order in memory
494
+ Ht = check_array(H.T, order="C")
495
+ X = check_array(X, accept_sparse="csr")
496
+
497
+ rng = check_random_state(random_state)
498
+
499
+ for n_iter in range(1, max_iter + 1):
500
+ violation = 0.0
501
+
502
+ # Update W
503
+ violation += _update_coordinate_descent(
504
+ X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng
505
+ )
506
+ # Update H
507
+ if update_H:
508
+ violation += _update_coordinate_descent(
509
+ X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng
510
+ )
511
+
512
+ if n_iter == 1:
513
+ violation_init = violation
514
+
515
+ if violation_init == 0:
516
+ break
517
+
518
+ if verbose:
519
+ print("violation:", violation / violation_init)
520
+
521
+ if violation / violation_init <= tol:
522
+ if verbose:
523
+ print("Converged at iteration", n_iter + 1)
524
+ break
525
+
526
+ return W, Ht.T, n_iter
527
+
528
+
529
+ def _multiplicative_update_w(
530
+ X,
531
+ W,
532
+ H,
533
+ beta_loss,
534
+ l1_reg_W,
535
+ l2_reg_W,
536
+ gamma,
537
+ H_sum=None,
538
+ HHt=None,
539
+ XHt=None,
540
+ update_H=True,
541
+ ):
542
+ """Update W in Multiplicative Update NMF."""
543
+ if beta_loss == 2:
544
+ # Numerator
545
+ if XHt is None:
546
+ XHt = safe_sparse_dot(X, H.T)
547
+ if update_H:
548
+ # avoid a copy of XHt, which will be re-computed (update_H=True)
549
+ numerator = XHt
550
+ else:
551
+ # preserve the XHt, which is not re-computed (update_H=False)
552
+ numerator = XHt.copy()
553
+
554
+ # Denominator
555
+ if HHt is None:
556
+ HHt = np.dot(H, H.T)
557
+ denominator = np.dot(W, HHt)
558
+
559
+ else:
560
+ # Numerator
561
+ # if X is sparse, compute WH only where X is non zero
562
+ WH_safe_X = _special_sparse_dot(W, H, X)
563
+ if sp.issparse(X):
564
+ WH_safe_X_data = WH_safe_X.data
565
+ X_data = X.data
566
+ else:
567
+ WH_safe_X_data = WH_safe_X
568
+ X_data = X
569
+ # copy used in the Denominator
570
+ WH = WH_safe_X.copy()
571
+ if beta_loss - 1.0 < 0:
572
+ WH[WH < EPSILON] = EPSILON
573
+
574
+ # to avoid taking a negative power of zero
575
+ if beta_loss - 2.0 < 0:
576
+ WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
577
+
578
+ if beta_loss == 1:
579
+ np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
580
+ elif beta_loss == 0:
581
+ # speeds up computation time
582
+ # refer to /numpy/numpy/issues/9363
583
+ WH_safe_X_data **= -1
584
+ WH_safe_X_data **= 2
585
+ # element-wise multiplication
586
+ WH_safe_X_data *= X_data
587
+ else:
588
+ WH_safe_X_data **= beta_loss - 2
589
+ # element-wise multiplication
590
+ WH_safe_X_data *= X_data
591
+
592
+ # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
593
+ numerator = safe_sparse_dot(WH_safe_X, H.T)
594
+
595
+ # Denominator
596
+ if beta_loss == 1:
597
+ if H_sum is None:
598
+ H_sum = np.sum(H, axis=1) # shape(n_components, )
599
+ denominator = H_sum[np.newaxis, :]
600
+
601
+ else:
602
+ # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
603
+ if sp.issparse(X):
604
+ # memory efficient computation
605
+ # (compute row by row, avoiding the dense matrix WH)
606
+ WHHt = np.empty(W.shape)
607
+ for i in range(X.shape[0]):
608
+ WHi = np.dot(W[i, :], H)
609
+ if beta_loss - 1 < 0:
610
+ WHi[WHi < EPSILON] = EPSILON
611
+ WHi **= beta_loss - 1
612
+ WHHt[i, :] = np.dot(WHi, H.T)
613
+ else:
614
+ WH **= beta_loss - 1
615
+ WHHt = np.dot(WH, H.T)
616
+ denominator = WHHt
617
+
618
+ # Add L1 and L2 regularization
619
+ if l1_reg_W > 0:
620
+ denominator += l1_reg_W
621
+ if l2_reg_W > 0:
622
+ denominator = denominator + l2_reg_W * W
623
+ denominator[denominator == 0] = EPSILON
624
+
625
+ numerator /= denominator
626
+ delta_W = numerator
627
+
628
+ # gamma is in ]0, 1]
629
+ if gamma != 1:
630
+ delta_W **= gamma
631
+
632
+ W *= delta_W
633
+
634
+ return W, H_sum, HHt, XHt
635
+
636
+
637
+ def _multiplicative_update_h(
638
+ X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None
639
+ ):
640
+ """update H in Multiplicative Update NMF."""
641
+ if beta_loss == 2:
642
+ numerator = safe_sparse_dot(W.T, X)
643
+ denominator = np.linalg.multi_dot([W.T, W, H])
644
+
645
+ else:
646
+ # Numerator
647
+ WH_safe_X = _special_sparse_dot(W, H, X)
648
+ if sp.issparse(X):
649
+ WH_safe_X_data = WH_safe_X.data
650
+ X_data = X.data
651
+ else:
652
+ WH_safe_X_data = WH_safe_X
653
+ X_data = X
654
+ # copy used in the Denominator
655
+ WH = WH_safe_X.copy()
656
+ if beta_loss - 1.0 < 0:
657
+ WH[WH < EPSILON] = EPSILON
658
+
659
+ # to avoid division by zero
660
+ if beta_loss - 2.0 < 0:
661
+ WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
662
+
663
+ if beta_loss == 1:
664
+ np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
665
+ elif beta_loss == 0:
666
+ # speeds up computation time
667
+ # refer to /numpy/numpy/issues/9363
668
+ WH_safe_X_data **= -1
669
+ WH_safe_X_data **= 2
670
+ # element-wise multiplication
671
+ WH_safe_X_data *= X_data
672
+ else:
673
+ WH_safe_X_data **= beta_loss - 2
674
+ # element-wise multiplication
675
+ WH_safe_X_data *= X_data
676
+
677
+ # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
678
+ numerator = safe_sparse_dot(W.T, WH_safe_X)
679
+
680
+ # Denominator
681
+ if beta_loss == 1:
682
+ W_sum = np.sum(W, axis=0) # shape(n_components, )
683
+ W_sum[W_sum == 0] = 1.0
684
+ denominator = W_sum[:, np.newaxis]
685
+
686
+ # beta_loss not in (1, 2)
687
+ else:
688
+ # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
689
+ if sp.issparse(X):
690
+ # memory efficient computation
691
+ # (compute column by column, avoiding the dense matrix WH)
692
+ WtWH = np.empty(H.shape)
693
+ for i in range(X.shape[1]):
694
+ WHi = np.dot(W, H[:, i])
695
+ if beta_loss - 1 < 0:
696
+ WHi[WHi < EPSILON] = EPSILON
697
+ WHi **= beta_loss - 1
698
+ WtWH[:, i] = np.dot(W.T, WHi)
699
+ else:
700
+ WH **= beta_loss - 1
701
+ WtWH = np.dot(W.T, WH)
702
+ denominator = WtWH
703
+
704
+ # Add L1 and L2 regularization
705
+ if l1_reg_H > 0:
706
+ denominator += l1_reg_H
707
+ if l2_reg_H > 0:
708
+ denominator = denominator + l2_reg_H * H
709
+ denominator[denominator == 0] = EPSILON
710
+
711
+ if A is not None and B is not None:
712
+ # Updates for the online nmf
713
+ if gamma != 1:
714
+ H **= 1 / gamma
715
+ numerator *= H
716
+ A *= rho
717
+ B *= rho
718
+ A += numerator
719
+ B += denominator
720
+ H = A / B
721
+
722
+ if gamma != 1:
723
+ H **= gamma
724
+ else:
725
+ delta_H = numerator
726
+ delta_H /= denominator
727
+ if gamma != 1:
728
+ delta_H **= gamma
729
+ H *= delta_H
730
+
731
+ return H
732
+
733
+
734
+ def _fit_multiplicative_update(
735
+ X,
736
+ W,
737
+ H,
738
+ beta_loss="frobenius",
739
+ max_iter=200,
740
+ tol=1e-4,
741
+ l1_reg_W=0,
742
+ l1_reg_H=0,
743
+ l2_reg_W=0,
744
+ l2_reg_H=0,
745
+ update_H=True,
746
+ verbose=0,
747
+ ):
748
+ """Compute Non-negative Matrix Factorization with Multiplicative Update.
749
+
750
+ The objective function is _beta_divergence(X, WH) and is minimized with an
751
+ alternating minimization of W and H. Each minimization is done with a
752
+ Multiplicative Update.
753
+
754
+ Parameters
755
+ ----------
756
+ X : array-like of shape (n_samples, n_features)
757
+ Constant input matrix.
758
+
759
+ W : array-like of shape (n_samples, n_components)
760
+ Initial guess for the solution.
761
+
762
+ H : array-like of shape (n_components, n_features)
763
+ Initial guess for the solution.
764
+
765
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
766
+ 'itakura-saito'}, default='frobenius'
767
+ String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
768
+ Beta divergence to be minimized, measuring the distance between X
769
+ and the dot product WH. Note that values different from 'frobenius'
770
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
771
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
772
+ matrix X cannot contain zeros.
773
+
774
+ max_iter : int, default=200
775
+ Number of iterations.
776
+
777
+ tol : float, default=1e-4
778
+ Tolerance of the stopping condition.
779
+
780
+ l1_reg_W : float, default=0.
781
+ L1 regularization parameter for W.
782
+
783
+ l1_reg_H : float, default=0.
784
+ L1 regularization parameter for H.
785
+
786
+ l2_reg_W : float, default=0.
787
+ L2 regularization parameter for W.
788
+
789
+ l2_reg_H : float, default=0.
790
+ L2 regularization parameter for H.
791
+
792
+ update_H : bool, default=True
793
+ Set to True, both W and H will be estimated from initial guesses.
794
+ Set to False, only W will be estimated.
795
+
796
+ verbose : int, default=0
797
+ The verbosity level.
798
+
799
+ Returns
800
+ -------
801
+ W : ndarray of shape (n_samples, n_components)
802
+ Solution to the non-negative least squares problem.
803
+
804
+ H : ndarray of shape (n_components, n_features)
805
+ Solution to the non-negative least squares problem.
806
+
807
+ n_iter : int
808
+ The number of iterations done by the algorithm.
809
+
810
+ References
811
+ ----------
812
+ Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix
813
+ Factorization. Adv. Neural Inform. Process. Syst.. 13.
814
+ Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
815
+ factorization with the beta-divergence. Neural Computation, 23(9).
816
+ """
817
+ start_time = time.time()
818
+
819
+ beta_loss = _beta_loss_to_float(beta_loss)
820
+
821
+ # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
822
+ if beta_loss < 1:
823
+ gamma = 1.0 / (2.0 - beta_loss)
824
+ elif beta_loss > 2:
825
+ gamma = 1.0 / (beta_loss - 1.0)
826
+ else:
827
+ gamma = 1.0
828
+
829
+ # used for the convergence criterion
830
+ error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
831
+ previous_error = error_at_init
832
+
833
+ H_sum, HHt, XHt = None, None, None
834
+ for n_iter in range(1, max_iter + 1):
835
+ # update W
836
+ # H_sum, HHt and XHt are saved and reused if not update_H
837
+ W, H_sum, HHt, XHt = _multiplicative_update_w(
838
+ X,
839
+ W,
840
+ H,
841
+ beta_loss=beta_loss,
842
+ l1_reg_W=l1_reg_W,
843
+ l2_reg_W=l2_reg_W,
844
+ gamma=gamma,
845
+ H_sum=H_sum,
846
+ HHt=HHt,
847
+ XHt=XHt,
848
+ update_H=update_H,
849
+ )
850
+
851
+ # necessary for stability with beta_loss < 1
852
+ if beta_loss < 1:
853
+ W[W < np.finfo(np.float64).eps] = 0.0
854
+
855
+ # update H (only at fit or fit_transform)
856
+ if update_H:
857
+ H = _multiplicative_update_h(
858
+ X,
859
+ W,
860
+ H,
861
+ beta_loss=beta_loss,
862
+ l1_reg_H=l1_reg_H,
863
+ l2_reg_H=l2_reg_H,
864
+ gamma=gamma,
865
+ )
866
+
867
+ # These values will be recomputed since H changed
868
+ H_sum, HHt, XHt = None, None, None
869
+
870
+ # necessary for stability with beta_loss < 1
871
+ if beta_loss <= 1:
872
+ H[H < np.finfo(np.float64).eps] = 0.0
873
+
874
+ # test convergence criterion every 10 iterations
875
+ if tol > 0 and n_iter % 10 == 0:
876
+ error = _beta_divergence(X, W, H, beta_loss, square_root=True)
877
+
878
+ if verbose:
879
+ iter_time = time.time()
880
+ print(
881
+ "Epoch %02d reached after %.3f seconds, error: %f"
882
+ % (n_iter, iter_time - start_time, error)
883
+ )
884
+
885
+ if (previous_error - error) / error_at_init < tol:
886
+ break
887
+ previous_error = error
888
+
889
+ # do not print if we have already printed in the convergence test
890
+ if verbose and (tol == 0 or n_iter % 10 != 0):
891
+ end_time = time.time()
892
+ print(
893
+ "Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)
894
+ )
895
+
896
+ return W, H, n_iter
897
+
898
+
899
+ @validate_params(
900
+ {
901
+ "X": ["array-like", "sparse matrix"],
902
+ "W": ["array-like", None],
903
+ "H": ["array-like", None],
904
+ "update_H": ["boolean"],
905
+ },
906
+ prefer_skip_nested_validation=False,
907
+ )
908
+ def non_negative_factorization(
909
+ X,
910
+ W=None,
911
+ H=None,
912
+ n_components="warn",
913
+ *,
914
+ init=None,
915
+ update_H=True,
916
+ solver="cd",
917
+ beta_loss="frobenius",
918
+ tol=1e-4,
919
+ max_iter=200,
920
+ alpha_W=0.0,
921
+ alpha_H="same",
922
+ l1_ratio=0.0,
923
+ random_state=None,
924
+ verbose=0,
925
+ shuffle=False,
926
+ ):
927
+ """Compute Non-negative Matrix Factorization (NMF).
928
+
929
+ Find two non-negative matrices (W, H) whose product approximates the non-
930
+ negative matrix X. This factorization can be used for example for
931
+ dimensionality reduction, source separation or topic extraction.
932
+
933
+ The objective function is:
934
+
935
+ .. math::
936
+
937
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
938
+
939
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
940
+
941
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
942
+
943
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
944
+
945
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
946
+
947
+ Where:
948
+
949
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
950
+
951
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
952
+
953
+ The generic norm :math:`||X - WH||_{loss}^2` may represent
954
+ the Frobenius norm or another supported beta-divergence loss.
955
+ The choice between options is controlled by the `beta_loss` parameter.
956
+
957
+ The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
958
+ `H` to keep their impact balanced with respect to one another and to the data fit
959
+ term as independent as possible of the size `n_samples` of the training set.
960
+
961
+ The objective function is minimized with an alternating minimization of W
962
+ and H. If H is given and update_H=False, it solves for W only.
963
+
964
+ Note that the transformed data is named W and the components matrix is named H. In
965
+ the NMF literature, the naming convention is usually the opposite since the data
966
+ matrix X is transposed.
967
+
968
+ Parameters
969
+ ----------
970
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
971
+ Constant matrix.
972
+
973
+ W : array-like of shape (n_samples, n_components), default=None
974
+ If `init='custom'`, it is used as initial guess for the solution.
975
+ If `update_H=False`, it is initialised as an array of zeros, unless
976
+ `solver='mu'`, then it is filled with values calculated by
977
+ `np.sqrt(X.mean() / self._n_components)`.
978
+ If `None`, uses the initialisation method specified in `init`.
979
+
980
+ H : array-like of shape (n_components, n_features), default=None
981
+ If `init='custom'`, it is used as initial guess for the solution.
982
+ If `update_H=False`, it is used as a constant, to solve for W only.
983
+ If `None`, uses the initialisation method specified in `init`.
984
+
985
+ n_components : int or {'auto'} or None, default=None
986
+ Number of components, if n_components is not set all features
987
+ are kept.
988
+ If `n_components='auto'`, the number of components is automatically inferred
989
+ from `W` or `H` shapes.
990
+
991
+ .. versionchanged:: 1.4
992
+ Added `'auto'` value.
993
+
994
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
995
+ Method used to initialize the procedure.
996
+
997
+ Valid options:
998
+
999
+ - None: 'nndsvda' if n_components < n_features, otherwise 'random'.
1000
+ - 'random': non-negative random matrices, scaled with:
1001
+ `sqrt(X.mean() / n_components)`
1002
+ - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
1003
+ initialization (better for sparseness)
1004
+ - 'nndsvda': NNDSVD with zeros filled with the average of X
1005
+ (better when sparsity is not desired)
1006
+ - 'nndsvdar': NNDSVD with zeros filled with small random values
1007
+ (generally faster, less accurate alternative to NNDSVDa
1008
+ for when sparsity is not desired)
1009
+ - 'custom': If `update_H=True`, use custom matrices W and H which must both
1010
+ be provided. If `update_H=False`, then only custom matrix H is used.
1011
+
1012
+ .. versionchanged:: 0.23
1013
+ The default value of `init` changed from 'random' to None in 0.23.
1014
+
1015
+ .. versionchanged:: 1.1
1016
+ When `init=None` and n_components is less than n_samples and n_features
1017
+ defaults to `nndsvda` instead of `nndsvd`.
1018
+
1019
+ update_H : bool, default=True
1020
+ Set to True, both W and H will be estimated from initial guesses.
1021
+ Set to False, only W will be estimated.
1022
+
1023
+ solver : {'cd', 'mu'}, default='cd'
1024
+ Numerical solver to use:
1025
+
1026
+ - 'cd' is a Coordinate Descent solver that uses Fast Hierarchical
1027
+ Alternating Least Squares (Fast HALS).
1028
+ - 'mu' is a Multiplicative Update solver.
1029
+
1030
+ .. versionadded:: 0.17
1031
+ Coordinate Descent solver.
1032
+
1033
+ .. versionadded:: 0.19
1034
+ Multiplicative Update solver.
1035
+
1036
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1037
+ 'itakura-saito'}, default='frobenius'
1038
+ Beta divergence to be minimized, measuring the distance between X
1039
+ and the dot product WH. Note that values different from 'frobenius'
1040
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1041
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
1042
+ matrix X cannot contain zeros. Used only in 'mu' solver.
1043
+
1044
+ .. versionadded:: 0.19
1045
+
1046
+ tol : float, default=1e-4
1047
+ Tolerance of the stopping condition.
1048
+
1049
+ max_iter : int, default=200
1050
+ Maximum number of iterations before timing out.
1051
+
1052
+ alpha_W : float, default=0.0
1053
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1054
+ (default) to have no regularization on `W`.
1055
+
1056
+ .. versionadded:: 1.0
1057
+
1058
+ alpha_H : float or "same", default="same"
1059
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1060
+ have no regularization on `H`. If "same" (default), it takes the same value as
1061
+ `alpha_W`.
1062
+
1063
+ .. versionadded:: 1.0
1064
+
1065
+ l1_ratio : float, default=0.0
1066
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1067
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1068
+ (aka Frobenius Norm).
1069
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1070
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1071
+
1072
+ random_state : int, RandomState instance or None, default=None
1073
+ Used for NMF initialisation (when ``init`` == 'nndsvdar' or
1074
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1075
+ results across multiple function calls.
1076
+ See :term:`Glossary <random_state>`.
1077
+
1078
+ verbose : int, default=0
1079
+ The verbosity level.
1080
+
1081
+ shuffle : bool, default=False
1082
+ If true, randomize the order of coordinates in the CD solver.
1083
+
1084
+ Returns
1085
+ -------
1086
+ W : ndarray of shape (n_samples, n_components)
1087
+ Solution to the non-negative least squares problem.
1088
+
1089
+ H : ndarray of shape (n_components, n_features)
1090
+ Solution to the non-negative least squares problem.
1091
+
1092
+ n_iter : int
1093
+ Actual number of iterations.
1094
+
1095
+ References
1096
+ ----------
1097
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1098
+ factorizations" <10.1587/transfun.E92.A.708>`
1099
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1100
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1101
+
1102
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1103
+ beta-divergence" <10.1162/NECO_a_00168>`
1104
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1105
+
1106
+ Examples
1107
+ --------
1108
+ >>> import numpy as np
1109
+ >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
1110
+ >>> from sklearn.decomposition import non_negative_factorization
1111
+ >>> W, H, n_iter = non_negative_factorization(
1112
+ ... X, n_components=2, init='random', random_state=0)
1113
+ """
1114
+ est = NMF(
1115
+ n_components=n_components,
1116
+ init=init,
1117
+ solver=solver,
1118
+ beta_loss=beta_loss,
1119
+ tol=tol,
1120
+ max_iter=max_iter,
1121
+ random_state=random_state,
1122
+ alpha_W=alpha_W,
1123
+ alpha_H=alpha_H,
1124
+ l1_ratio=l1_ratio,
1125
+ verbose=verbose,
1126
+ shuffle=shuffle,
1127
+ )
1128
+ est._validate_params()
1129
+
1130
+ X = check_array(X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32])
1131
+
1132
+ with config_context(assume_finite=True):
1133
+ W, H, n_iter = est._fit_transform(X, W=W, H=H, update_H=update_H)
1134
+
1135
+ return W, H, n_iter
1136
+
1137
+
1138
+ class _BaseNMF(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, ABC):
1139
+ """Base class for NMF and MiniBatchNMF."""
1140
+
1141
+ # This prevents ``set_split_inverse_transform`` to be generated for the
1142
+ # non-standard ``W`` arg on ``inverse_transform``.
1143
+ # TODO: remove when W is removed in v1.5 for inverse_transform
1144
+ __metadata_request__inverse_transform = {"W": metadata_routing.UNUSED}
1145
+
1146
+ _parameter_constraints: dict = {
1147
+ "n_components": [
1148
+ Interval(Integral, 1, None, closed="left"),
1149
+ None,
1150
+ StrOptions({"auto"}),
1151
+ Hidden(StrOptions({"warn"})),
1152
+ ],
1153
+ "init": [
1154
+ StrOptions({"random", "nndsvd", "nndsvda", "nndsvdar", "custom"}),
1155
+ None,
1156
+ ],
1157
+ "beta_loss": [
1158
+ StrOptions({"frobenius", "kullback-leibler", "itakura-saito"}),
1159
+ Real,
1160
+ ],
1161
+ "tol": [Interval(Real, 0, None, closed="left")],
1162
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
1163
+ "random_state": ["random_state"],
1164
+ "alpha_W": [Interval(Real, 0, None, closed="left")],
1165
+ "alpha_H": [Interval(Real, 0, None, closed="left"), StrOptions({"same"})],
1166
+ "l1_ratio": [Interval(Real, 0, 1, closed="both")],
1167
+ "verbose": ["verbose"],
1168
+ }
1169
+
1170
+ def __init__(
1171
+ self,
1172
+ n_components="warn",
1173
+ *,
1174
+ init=None,
1175
+ beta_loss="frobenius",
1176
+ tol=1e-4,
1177
+ max_iter=200,
1178
+ random_state=None,
1179
+ alpha_W=0.0,
1180
+ alpha_H="same",
1181
+ l1_ratio=0.0,
1182
+ verbose=0,
1183
+ ):
1184
+ self.n_components = n_components
1185
+ self.init = init
1186
+ self.beta_loss = beta_loss
1187
+ self.tol = tol
1188
+ self.max_iter = max_iter
1189
+ self.random_state = random_state
1190
+ self.alpha_W = alpha_W
1191
+ self.alpha_H = alpha_H
1192
+ self.l1_ratio = l1_ratio
1193
+ self.verbose = verbose
1194
+
1195
+ def _check_params(self, X):
1196
+ # n_components
1197
+ self._n_components = self.n_components
1198
+ if self.n_components == "warn":
1199
+ warnings.warn(
1200
+ (
1201
+ "The default value of `n_components` will change from `None` to"
1202
+ " `'auto'` in 1.6. Set the value of `n_components` to `None`"
1203
+ " explicitly to suppress the warning."
1204
+ ),
1205
+ FutureWarning,
1206
+ )
1207
+ self._n_components = None # Keeping the old default value
1208
+ if self._n_components is None:
1209
+ self._n_components = X.shape[1]
1210
+
1211
+ # beta_loss
1212
+ self._beta_loss = _beta_loss_to_float(self.beta_loss)
1213
+
1214
+ def _check_w_h(self, X, W, H, update_H):
1215
+ """Check W and H, or initialize them."""
1216
+ n_samples, n_features = X.shape
1217
+
1218
+ if self.init == "custom" and update_H:
1219
+ _check_init(H, (self._n_components, n_features), "NMF (input H)")
1220
+ _check_init(W, (n_samples, self._n_components), "NMF (input W)")
1221
+ if self._n_components == "auto":
1222
+ self._n_components = H.shape[0]
1223
+
1224
+ if H.dtype != X.dtype or W.dtype != X.dtype:
1225
+ raise TypeError(
1226
+ "H and W should have the same dtype as X. Got "
1227
+ "H.dtype = {} and W.dtype = {}.".format(H.dtype, W.dtype)
1228
+ )
1229
+
1230
+ elif not update_H:
1231
+ if W is not None:
1232
+ warnings.warn(
1233
+ "When update_H=False, the provided initial W is not used.",
1234
+ RuntimeWarning,
1235
+ )
1236
+
1237
+ _check_init(H, (self._n_components, n_features), "NMF (input H)")
1238
+ if self._n_components == "auto":
1239
+ self._n_components = H.shape[0]
1240
+
1241
+ if H.dtype != X.dtype:
1242
+ raise TypeError(
1243
+ "H should have the same dtype as X. Got H.dtype = {}.".format(
1244
+ H.dtype
1245
+ )
1246
+ )
1247
+
1248
+ # 'mu' solver should not be initialized by zeros
1249
+ if self.solver == "mu":
1250
+ avg = np.sqrt(X.mean() / self._n_components)
1251
+ W = np.full((n_samples, self._n_components), avg, dtype=X.dtype)
1252
+ else:
1253
+ W = np.zeros((n_samples, self._n_components), dtype=X.dtype)
1254
+
1255
+ else:
1256
+ if W is not None or H is not None:
1257
+ warnings.warn(
1258
+ (
1259
+ "When init!='custom', provided W or H are ignored. Set "
1260
+ " init='custom' to use them as initialization."
1261
+ ),
1262
+ RuntimeWarning,
1263
+ )
1264
+
1265
+ if self._n_components == "auto":
1266
+ self._n_components = X.shape[1]
1267
+
1268
+ W, H = _initialize_nmf(
1269
+ X, self._n_components, init=self.init, random_state=self.random_state
1270
+ )
1271
+
1272
+ return W, H
1273
+
1274
+ def _compute_regularization(self, X):
1275
+ """Compute scaled regularization terms."""
1276
+ n_samples, n_features = X.shape
1277
+ alpha_W = self.alpha_W
1278
+ alpha_H = self.alpha_W if self.alpha_H == "same" else self.alpha_H
1279
+
1280
+ l1_reg_W = n_features * alpha_W * self.l1_ratio
1281
+ l1_reg_H = n_samples * alpha_H * self.l1_ratio
1282
+ l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio)
1283
+ l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio)
1284
+
1285
+ return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
1286
+
1287
+ def fit(self, X, y=None, **params):
1288
+ """Learn a NMF model for the data X.
1289
+
1290
+ Parameters
1291
+ ----------
1292
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1293
+ Training vector, where `n_samples` is the number of samples
1294
+ and `n_features` is the number of features.
1295
+
1296
+ y : Ignored
1297
+ Not used, present for API consistency by convention.
1298
+
1299
+ **params : kwargs
1300
+ Parameters (keyword arguments) and values passed to
1301
+ the fit_transform instance.
1302
+
1303
+ Returns
1304
+ -------
1305
+ self : object
1306
+ Returns the instance itself.
1307
+ """
1308
+ # param validation is done in fit_transform
1309
+
1310
+ self.fit_transform(X, **params)
1311
+ return self
1312
+
1313
+ def inverse_transform(self, Xt=None, W=None):
1314
+ """Transform data back to its original space.
1315
+
1316
+ .. versionadded:: 0.18
1317
+
1318
+ Parameters
1319
+ ----------
1320
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_components)
1321
+ Transformed data matrix.
1322
+
1323
+ W : deprecated
1324
+ Use `Xt` instead.
1325
+
1326
+ .. deprecated:: 1.3
1327
+
1328
+ Returns
1329
+ -------
1330
+ X : ndarray of shape (n_samples, n_features)
1331
+ Returns a data matrix of the original shape.
1332
+ """
1333
+ if Xt is None and W is None:
1334
+ raise TypeError("Missing required positional argument: Xt")
1335
+
1336
+ if W is not None and Xt is not None:
1337
+ raise ValueError("Please provide only `Xt`, and not `W`.")
1338
+
1339
+ if W is not None:
1340
+ warnings.warn(
1341
+ (
1342
+ "Input argument `W` was renamed to `Xt` in v1.3 and will be removed"
1343
+ " in v1.5."
1344
+ ),
1345
+ FutureWarning,
1346
+ )
1347
+ Xt = W
1348
+
1349
+ check_is_fitted(self)
1350
+ return Xt @ self.components_
1351
+
1352
+ @property
1353
+ def _n_features_out(self):
1354
+ """Number of transformed output features."""
1355
+ return self.components_.shape[0]
1356
+
1357
+ def _more_tags(self):
1358
+ return {
1359
+ "requires_positive_X": True,
1360
+ "preserves_dtype": [np.float64, np.float32],
1361
+ }
1362
+
1363
+
1364
+ class NMF(_BaseNMF):
1365
+ """Non-Negative Matrix Factorization (NMF).
1366
+
1367
+ Find two non-negative matrices, i.e. matrices with all non-negative elements, (W, H)
1368
+ whose product approximates the non-negative matrix X. This factorization can be used
1369
+ for example for dimensionality reduction, source separation or topic extraction.
1370
+
1371
+ The objective function is:
1372
+
1373
+ .. math::
1374
+
1375
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
1376
+
1377
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
1378
+
1379
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
1380
+
1381
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
1382
+
1383
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
1384
+
1385
+ Where:
1386
+
1387
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
1388
+
1389
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
1390
+
1391
+ The generic norm :math:`||X - WH||_{loss}` may represent
1392
+ the Frobenius norm or another supported beta-divergence loss.
1393
+ The choice between options is controlled by the `beta_loss` parameter.
1394
+
1395
+ The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
1396
+ `H` to keep their impact balanced with respect to one another and to the data fit
1397
+ term as independent as possible of the size `n_samples` of the training set.
1398
+
1399
+ The objective function is minimized with an alternating minimization of W
1400
+ and H.
1401
+
1402
+ Note that the transformed data is named W and the components matrix is named H. In
1403
+ the NMF literature, the naming convention is usually the opposite since the data
1404
+ matrix X is transposed.
1405
+
1406
+ Read more in the :ref:`User Guide <NMF>`.
1407
+
1408
+ Parameters
1409
+ ----------
1410
+ n_components : int or {'auto'} or None, default=None
1411
+ Number of components, if n_components is not set all features
1412
+ are kept.
1413
+ If `n_components='auto'`, the number of components is automatically inferred
1414
+ from W or H shapes.
1415
+
1416
+ .. versionchanged:: 1.4
1417
+ Added `'auto'` value.
1418
+
1419
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
1420
+ Method used to initialize the procedure.
1421
+ Valid options:
1422
+
1423
+ - `None`: 'nndsvda' if n_components <= min(n_samples, n_features),
1424
+ otherwise random.
1425
+
1426
+ - `'random'`: non-negative random matrices, scaled with:
1427
+ `sqrt(X.mean() / n_components)`
1428
+
1429
+ - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)
1430
+ initialization (better for sparseness)
1431
+
1432
+ - `'nndsvda'`: NNDSVD with zeros filled with the average of X
1433
+ (better when sparsity is not desired)
1434
+
1435
+ - `'nndsvdar'` NNDSVD with zeros filled with small random values
1436
+ (generally faster, less accurate alternative to NNDSVDa
1437
+ for when sparsity is not desired)
1438
+
1439
+ - `'custom'`: Use custom matrices `W` and `H` which must both be provided.
1440
+
1441
+ .. versionchanged:: 1.1
1442
+ When `init=None` and n_components is less than n_samples and n_features
1443
+ defaults to `nndsvda` instead of `nndsvd`.
1444
+
1445
+ solver : {'cd', 'mu'}, default='cd'
1446
+ Numerical solver to use:
1447
+
1448
+ - 'cd' is a Coordinate Descent solver.
1449
+ - 'mu' is a Multiplicative Update solver.
1450
+
1451
+ .. versionadded:: 0.17
1452
+ Coordinate Descent solver.
1453
+
1454
+ .. versionadded:: 0.19
1455
+ Multiplicative Update solver.
1456
+
1457
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1458
+ 'itakura-saito'}, default='frobenius'
1459
+ Beta divergence to be minimized, measuring the distance between X
1460
+ and the dot product WH. Note that values different from 'frobenius'
1461
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1462
+ fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
1463
+ matrix X cannot contain zeros. Used only in 'mu' solver.
1464
+
1465
+ .. versionadded:: 0.19
1466
+
1467
+ tol : float, default=1e-4
1468
+ Tolerance of the stopping condition.
1469
+
1470
+ max_iter : int, default=200
1471
+ Maximum number of iterations before timing out.
1472
+
1473
+ random_state : int, RandomState instance or None, default=None
1474
+ Used for initialisation (when ``init`` == 'nndsvdar' or
1475
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1476
+ results across multiple function calls.
1477
+ See :term:`Glossary <random_state>`.
1478
+
1479
+ alpha_W : float, default=0.0
1480
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1481
+ (default) to have no regularization on `W`.
1482
+
1483
+ .. versionadded:: 1.0
1484
+
1485
+ alpha_H : float or "same", default="same"
1486
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1487
+ have no regularization on `H`. If "same" (default), it takes the same value as
1488
+ `alpha_W`.
1489
+
1490
+ .. versionadded:: 1.0
1491
+
1492
+ l1_ratio : float, default=0.0
1493
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1494
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1495
+ (aka Frobenius Norm).
1496
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1497
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1498
+
1499
+ .. versionadded:: 0.17
1500
+ Regularization parameter *l1_ratio* used in the Coordinate Descent
1501
+ solver.
1502
+
1503
+ verbose : int, default=0
1504
+ Whether to be verbose.
1505
+
1506
+ shuffle : bool, default=False
1507
+ If true, randomize the order of coordinates in the CD solver.
1508
+
1509
+ .. versionadded:: 0.17
1510
+ *shuffle* parameter used in the Coordinate Descent solver.
1511
+
1512
+ Attributes
1513
+ ----------
1514
+ components_ : ndarray of shape (n_components, n_features)
1515
+ Factorization matrix, sometimes called 'dictionary'.
1516
+
1517
+ n_components_ : int
1518
+ The number of components. It is same as the `n_components` parameter
1519
+ if it was given. Otherwise, it will be same as the number of
1520
+ features.
1521
+
1522
+ reconstruction_err_ : float
1523
+ Frobenius norm of the matrix difference, or beta-divergence, between
1524
+ the training data ``X`` and the reconstructed data ``WH`` from
1525
+ the fitted model.
1526
+
1527
+ n_iter_ : int
1528
+ Actual number of iterations.
1529
+
1530
+ n_features_in_ : int
1531
+ Number of features seen during :term:`fit`.
1532
+
1533
+ .. versionadded:: 0.24
1534
+
1535
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1536
+ Names of features seen during :term:`fit`. Defined only when `X`
1537
+ has feature names that are all strings.
1538
+
1539
+ .. versionadded:: 1.0
1540
+
1541
+ See Also
1542
+ --------
1543
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
1544
+ MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1545
+ PCA : Principal component analysis.
1546
+ SparseCoder : Find a sparse representation of data from a fixed,
1547
+ precomputed dictionary.
1548
+ SparsePCA : Sparse Principal Components Analysis.
1549
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
1550
+
1551
+ References
1552
+ ----------
1553
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1554
+ factorizations" <10.1587/transfun.E92.A.708>`
1555
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1556
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1557
+
1558
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1559
+ beta-divergence" <10.1162/NECO_a_00168>`
1560
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1561
+
1562
+ Examples
1563
+ --------
1564
+ >>> import numpy as np
1565
+ >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
1566
+ >>> from sklearn.decomposition import NMF
1567
+ >>> model = NMF(n_components=2, init='random', random_state=0)
1568
+ >>> W = model.fit_transform(X)
1569
+ >>> H = model.components_
1570
+ """
1571
+
1572
+ _parameter_constraints: dict = {
1573
+ **_BaseNMF._parameter_constraints,
1574
+ "solver": [StrOptions({"mu", "cd"})],
1575
+ "shuffle": ["boolean"],
1576
+ }
1577
+
1578
+ def __init__(
1579
+ self,
1580
+ n_components="warn",
1581
+ *,
1582
+ init=None,
1583
+ solver="cd",
1584
+ beta_loss="frobenius",
1585
+ tol=1e-4,
1586
+ max_iter=200,
1587
+ random_state=None,
1588
+ alpha_W=0.0,
1589
+ alpha_H="same",
1590
+ l1_ratio=0.0,
1591
+ verbose=0,
1592
+ shuffle=False,
1593
+ ):
1594
+ super().__init__(
1595
+ n_components=n_components,
1596
+ init=init,
1597
+ beta_loss=beta_loss,
1598
+ tol=tol,
1599
+ max_iter=max_iter,
1600
+ random_state=random_state,
1601
+ alpha_W=alpha_W,
1602
+ alpha_H=alpha_H,
1603
+ l1_ratio=l1_ratio,
1604
+ verbose=verbose,
1605
+ )
1606
+
1607
+ self.solver = solver
1608
+ self.shuffle = shuffle
1609
+
1610
+ def _check_params(self, X):
1611
+ super()._check_params(X)
1612
+
1613
+ # solver
1614
+ if self.solver != "mu" and self.beta_loss not in (2, "frobenius"):
1615
+ # 'mu' is the only solver that handles other beta losses than 'frobenius'
1616
+ raise ValueError(
1617
+ f"Invalid beta_loss parameter: solver {self.solver!r} does not handle "
1618
+ f"beta_loss = {self.beta_loss!r}"
1619
+ )
1620
+ if self.solver == "mu" and self.init == "nndsvd":
1621
+ warnings.warn(
1622
+ (
1623
+ "The multiplicative update ('mu') solver cannot update "
1624
+ "zeros present in the initialization, and so leads to "
1625
+ "poorer results when used jointly with init='nndsvd'. "
1626
+ "You may try init='nndsvda' or init='nndsvdar' instead."
1627
+ ),
1628
+ UserWarning,
1629
+ )
1630
+
1631
+ return self
1632
+
1633
+ @_fit_context(prefer_skip_nested_validation=True)
1634
+ def fit_transform(self, X, y=None, W=None, H=None):
1635
+ """Learn a NMF model for the data X and returns the transformed data.
1636
+
1637
+ This is more efficient than calling fit followed by transform.
1638
+
1639
+ Parameters
1640
+ ----------
1641
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1642
+ Training vector, where `n_samples` is the number of samples
1643
+ and `n_features` is the number of features.
1644
+
1645
+ y : Ignored
1646
+ Not used, present for API consistency by convention.
1647
+
1648
+ W : array-like of shape (n_samples, n_components), default=None
1649
+ If `init='custom'`, it is used as initial guess for the solution.
1650
+ If `None`, uses the initialisation method specified in `init`.
1651
+
1652
+ H : array-like of shape (n_components, n_features), default=None
1653
+ If `init='custom'`, it is used as initial guess for the solution.
1654
+ If `None`, uses the initialisation method specified in `init`.
1655
+
1656
+ Returns
1657
+ -------
1658
+ W : ndarray of shape (n_samples, n_components)
1659
+ Transformed data.
1660
+ """
1661
+ X = self._validate_data(
1662
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
1663
+ )
1664
+
1665
+ with config_context(assume_finite=True):
1666
+ W, H, n_iter = self._fit_transform(X, W=W, H=H)
1667
+
1668
+ self.reconstruction_err_ = _beta_divergence(
1669
+ X, W, H, self._beta_loss, square_root=True
1670
+ )
1671
+
1672
+ self.n_components_ = H.shape[0]
1673
+ self.components_ = H
1674
+ self.n_iter_ = n_iter
1675
+
1676
+ return W
1677
+
1678
+ def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
1679
+ """Learn a NMF model for the data X and returns the transformed data.
1680
+
1681
+ Parameters
1682
+ ----------
1683
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1684
+ Data matrix to be decomposed
1685
+
1686
+ y : Ignored
1687
+
1688
+ W : array-like of shape (n_samples, n_components), default=None
1689
+ If `init='custom'`, it is used as initial guess for the solution.
1690
+ If `update_H=False`, it is initialised as an array of zeros, unless
1691
+ `solver='mu'`, then it is filled with values calculated by
1692
+ `np.sqrt(X.mean() / self._n_components)`.
1693
+ If `None`, uses the initialisation method specified in `init`.
1694
+
1695
+ H : array-like of shape (n_components, n_features), default=None
1696
+ If `init='custom'`, it is used as initial guess for the solution.
1697
+ If `update_H=False`, it is used as a constant, to solve for W only.
1698
+ If `None`, uses the initialisation method specified in `init`.
1699
+
1700
+ update_H : bool, default=True
1701
+ If True, both W and H will be estimated from initial guesses,
1702
+ this corresponds to a call to the 'fit_transform' method.
1703
+ If False, only W will be estimated, this corresponds to a call
1704
+ to the 'transform' method.
1705
+
1706
+ Returns
1707
+ -------
1708
+ W : ndarray of shape (n_samples, n_components)
1709
+ Transformed data.
1710
+
1711
+ H : ndarray of shape (n_components, n_features)
1712
+ Factorization matrix, sometimes called 'dictionary'.
1713
+
1714
+ n_iter_ : int
1715
+ Actual number of iterations.
1716
+ """
1717
+ check_non_negative(X, "NMF (input X)")
1718
+
1719
+ # check parameters
1720
+ self._check_params(X)
1721
+
1722
+ if X.min() == 0 and self._beta_loss <= 0:
1723
+ raise ValueError(
1724
+ "When beta_loss <= 0 and X contains zeros, "
1725
+ "the solver may diverge. Please add small values "
1726
+ "to X, or use a positive beta_loss."
1727
+ )
1728
+
1729
+ # initialize or check W and H
1730
+ W, H = self._check_w_h(X, W, H, update_H)
1731
+
1732
+ # scale the regularization terms
1733
+ l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
1734
+
1735
+ if self.solver == "cd":
1736
+ W, H, n_iter = _fit_coordinate_descent(
1737
+ X,
1738
+ W,
1739
+ H,
1740
+ self.tol,
1741
+ self.max_iter,
1742
+ l1_reg_W,
1743
+ l1_reg_H,
1744
+ l2_reg_W,
1745
+ l2_reg_H,
1746
+ update_H=update_H,
1747
+ verbose=self.verbose,
1748
+ shuffle=self.shuffle,
1749
+ random_state=self.random_state,
1750
+ )
1751
+ elif self.solver == "mu":
1752
+ W, H, n_iter, *_ = _fit_multiplicative_update(
1753
+ X,
1754
+ W,
1755
+ H,
1756
+ self._beta_loss,
1757
+ self.max_iter,
1758
+ self.tol,
1759
+ l1_reg_W,
1760
+ l1_reg_H,
1761
+ l2_reg_W,
1762
+ l2_reg_H,
1763
+ update_H,
1764
+ self.verbose,
1765
+ )
1766
+ else:
1767
+ raise ValueError("Invalid solver parameter '%s'." % self.solver)
1768
+
1769
+ if n_iter == self.max_iter and self.tol > 0:
1770
+ warnings.warn(
1771
+ "Maximum number of iterations %d reached. Increase "
1772
+ "it to improve convergence."
1773
+ % self.max_iter,
1774
+ ConvergenceWarning,
1775
+ )
1776
+
1777
+ return W, H, n_iter
1778
+
1779
+ def transform(self, X):
1780
+ """Transform the data X according to the fitted NMF model.
1781
+
1782
+ Parameters
1783
+ ----------
1784
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1785
+ Training vector, where `n_samples` is the number of samples
1786
+ and `n_features` is the number of features.
1787
+
1788
+ Returns
1789
+ -------
1790
+ W : ndarray of shape (n_samples, n_components)
1791
+ Transformed data.
1792
+ """
1793
+ check_is_fitted(self)
1794
+ X = self._validate_data(
1795
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False
1796
+ )
1797
+
1798
+ with config_context(assume_finite=True):
1799
+ W, *_ = self._fit_transform(X, H=self.components_, update_H=False)
1800
+
1801
+ return W
1802
+
1803
+
1804
+ class MiniBatchNMF(_BaseNMF):
1805
+ """Mini-Batch Non-Negative Matrix Factorization (NMF).
1806
+
1807
+ .. versionadded:: 1.1
1808
+
1809
+ Find two non-negative matrices, i.e. matrices with all non-negative elements,
1810
+ (`W`, `H`) whose product approximates the non-negative matrix `X`. This
1811
+ factorization can be used for example for dimensionality reduction, source
1812
+ separation or topic extraction.
1813
+
1814
+ The objective function is:
1815
+
1816
+ .. math::
1817
+
1818
+ L(W, H) &= 0.5 * ||X - WH||_{loss}^2
1819
+
1820
+ &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
1821
+
1822
+ &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
1823
+
1824
+ &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
1825
+
1826
+ &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2
1827
+
1828
+ Where:
1829
+
1830
+ :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)
1831
+
1832
+ :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
1833
+
1834
+ The generic norm :math:`||X - WH||_{loss}^2` may represent
1835
+ the Frobenius norm or another supported beta-divergence loss.
1836
+ The choice between options is controlled by the `beta_loss` parameter.
1837
+
1838
+ The objective function is minimized with an alternating minimization of `W`
1839
+ and `H`.
1840
+
1841
+ Note that the transformed data is named `W` and the components matrix is
1842
+ named `H`. In the NMF literature, the naming convention is usually the opposite
1843
+ since the data matrix `X` is transposed.
1844
+
1845
+ Read more in the :ref:`User Guide <MiniBatchNMF>`.
1846
+
1847
+ Parameters
1848
+ ----------
1849
+ n_components : int or {'auto'} or None, default=None
1850
+ Number of components, if `n_components` is not set all features
1851
+ are kept.
1852
+ If `n_components='auto'`, the number of components is automatically inferred
1853
+ from W or H shapes.
1854
+
1855
+ .. versionchanged:: 1.4
1856
+ Added `'auto'` value.
1857
+
1858
+ init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
1859
+ Method used to initialize the procedure.
1860
+ Valid options:
1861
+
1862
+ - `None`: 'nndsvda' if `n_components <= min(n_samples, n_features)`,
1863
+ otherwise random.
1864
+
1865
+ - `'random'`: non-negative random matrices, scaled with:
1866
+ `sqrt(X.mean() / n_components)`
1867
+
1868
+ - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)
1869
+ initialization (better for sparseness).
1870
+
1871
+ - `'nndsvda'`: NNDSVD with zeros filled with the average of X
1872
+ (better when sparsity is not desired).
1873
+
1874
+ - `'nndsvdar'` NNDSVD with zeros filled with small random values
1875
+ (generally faster, less accurate alternative to NNDSVDa
1876
+ for when sparsity is not desired).
1877
+
1878
+ - `'custom'`: Use custom matrices `W` and `H` which must both be provided.
1879
+
1880
+ batch_size : int, default=1024
1881
+ Number of samples in each mini-batch. Large batch sizes
1882
+ give better long-term convergence at the cost of a slower start.
1883
+
1884
+ beta_loss : float or {'frobenius', 'kullback-leibler', \
1885
+ 'itakura-saito'}, default='frobenius'
1886
+ Beta divergence to be minimized, measuring the distance between `X`
1887
+ and the dot product `WH`. Note that values different from 'frobenius'
1888
+ (or 2) and 'kullback-leibler' (or 1) lead to significantly slower
1889
+ fits. Note that for `beta_loss <= 0` (or 'itakura-saito'), the input
1890
+ matrix `X` cannot contain zeros.
1891
+
1892
+ tol : float, default=1e-4
1893
+ Control early stopping based on the norm of the differences in `H`
1894
+ between 2 steps. To disable early stopping based on changes in `H`, set
1895
+ `tol` to 0.0.
1896
+
1897
+ max_no_improvement : int, default=10
1898
+ Control early stopping based on the consecutive number of mini batches
1899
+ that does not yield an improvement on the smoothed cost function.
1900
+ To disable convergence detection based on cost function, set
1901
+ `max_no_improvement` to None.
1902
+
1903
+ max_iter : int, default=200
1904
+ Maximum number of iterations over the complete dataset before
1905
+ timing out.
1906
+
1907
+ alpha_W : float, default=0.0
1908
+ Constant that multiplies the regularization terms of `W`. Set it to zero
1909
+ (default) to have no regularization on `W`.
1910
+
1911
+ alpha_H : float or "same", default="same"
1912
+ Constant that multiplies the regularization terms of `H`. Set it to zero to
1913
+ have no regularization on `H`. If "same" (default), it takes the same value as
1914
+ `alpha_W`.
1915
+
1916
+ l1_ratio : float, default=0.0
1917
+ The regularization mixing parameter, with 0 <= l1_ratio <= 1.
1918
+ For l1_ratio = 0 the penalty is an elementwise L2 penalty
1919
+ (aka Frobenius Norm).
1920
+ For l1_ratio = 1 it is an elementwise L1 penalty.
1921
+ For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
1922
+
1923
+ forget_factor : float, default=0.7
1924
+ Amount of rescaling of past information. Its value could be 1 with
1925
+ finite datasets. Choosing values < 1 is recommended with online
1926
+ learning as more recent batches will weight more than past batches.
1927
+
1928
+ fresh_restarts : bool, default=False
1929
+ Whether to completely solve for W at each step. Doing fresh restarts will likely
1930
+ lead to a better solution for a same number of iterations but it is much slower.
1931
+
1932
+ fresh_restarts_max_iter : int, default=30
1933
+ Maximum number of iterations when solving for W at each step. Only used when
1934
+ doing fresh restarts. These iterations may be stopped early based on a small
1935
+ change of W controlled by `tol`.
1936
+
1937
+ transform_max_iter : int, default=None
1938
+ Maximum number of iterations when solving for W at transform time.
1939
+ If None, it defaults to `max_iter`.
1940
+
1941
+ random_state : int, RandomState instance or None, default=None
1942
+ Used for initialisation (when ``init`` == 'nndsvdar' or
1943
+ 'random'), and in Coordinate Descent. Pass an int for reproducible
1944
+ results across multiple function calls.
1945
+ See :term:`Glossary <random_state>`.
1946
+
1947
+ verbose : bool, default=False
1948
+ Whether to be verbose.
1949
+
1950
+ Attributes
1951
+ ----------
1952
+ components_ : ndarray of shape (n_components, n_features)
1953
+ Factorization matrix, sometimes called 'dictionary'.
1954
+
1955
+ n_components_ : int
1956
+ The number of components. It is same as the `n_components` parameter
1957
+ if it was given. Otherwise, it will be same as the number of
1958
+ features.
1959
+
1960
+ reconstruction_err_ : float
1961
+ Frobenius norm of the matrix difference, or beta-divergence, between
1962
+ the training data `X` and the reconstructed data `WH` from
1963
+ the fitted model.
1964
+
1965
+ n_iter_ : int
1966
+ Actual number of started iterations over the whole dataset.
1967
+
1968
+ n_steps_ : int
1969
+ Number of mini-batches processed.
1970
+
1971
+ n_features_in_ : int
1972
+ Number of features seen during :term:`fit`.
1973
+
1974
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1975
+ Names of features seen during :term:`fit`. Defined only when `X`
1976
+ has feature names that are all strings.
1977
+
1978
+ See Also
1979
+ --------
1980
+ NMF : Non-negative matrix factorization.
1981
+ MiniBatchDictionaryLearning : Finds a dictionary that can best be used to represent
1982
+ data using a sparse code.
1983
+
1984
+ References
1985
+ ----------
1986
+ .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
1987
+ factorizations" <10.1587/transfun.E92.A.708>`
1988
+ Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
1989
+ of electronics, communications and computer sciences 92.3: 708-721, 2009.
1990
+
1991
+ .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
1992
+ beta-divergence" <10.1162/NECO_a_00168>`
1993
+ Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
1994
+
1995
+ .. [3] :doi:`"Online algorithms for nonnegative matrix factorization with the
1996
+ Itakura-Saito divergence" <10.1109/ASPAA.2011.6082314>`
1997
+ Lefevre, A., Bach, F., Fevotte, C. (2011). WASPA.
1998
+
1999
+ Examples
2000
+ --------
2001
+ >>> import numpy as np
2002
+ >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
2003
+ >>> from sklearn.decomposition import MiniBatchNMF
2004
+ >>> model = MiniBatchNMF(n_components=2, init='random', random_state=0)
2005
+ >>> W = model.fit_transform(X)
2006
+ >>> H = model.components_
2007
+ """
2008
+
2009
+ _parameter_constraints: dict = {
2010
+ **_BaseNMF._parameter_constraints,
2011
+ "max_no_improvement": [Interval(Integral, 1, None, closed="left"), None],
2012
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
2013
+ "forget_factor": [Interval(Real, 0, 1, closed="both")],
2014
+ "fresh_restarts": ["boolean"],
2015
+ "fresh_restarts_max_iter": [Interval(Integral, 1, None, closed="left")],
2016
+ "transform_max_iter": [Interval(Integral, 1, None, closed="left"), None],
2017
+ }
2018
+
2019
+ def __init__(
2020
+ self,
2021
+ n_components="warn",
2022
+ *,
2023
+ init=None,
2024
+ batch_size=1024,
2025
+ beta_loss="frobenius",
2026
+ tol=1e-4,
2027
+ max_no_improvement=10,
2028
+ max_iter=200,
2029
+ alpha_W=0.0,
2030
+ alpha_H="same",
2031
+ l1_ratio=0.0,
2032
+ forget_factor=0.7,
2033
+ fresh_restarts=False,
2034
+ fresh_restarts_max_iter=30,
2035
+ transform_max_iter=None,
2036
+ random_state=None,
2037
+ verbose=0,
2038
+ ):
2039
+ super().__init__(
2040
+ n_components=n_components,
2041
+ init=init,
2042
+ beta_loss=beta_loss,
2043
+ tol=tol,
2044
+ max_iter=max_iter,
2045
+ random_state=random_state,
2046
+ alpha_W=alpha_W,
2047
+ alpha_H=alpha_H,
2048
+ l1_ratio=l1_ratio,
2049
+ verbose=verbose,
2050
+ )
2051
+
2052
+ self.max_no_improvement = max_no_improvement
2053
+ self.batch_size = batch_size
2054
+ self.forget_factor = forget_factor
2055
+ self.fresh_restarts = fresh_restarts
2056
+ self.fresh_restarts_max_iter = fresh_restarts_max_iter
2057
+ self.transform_max_iter = transform_max_iter
2058
+
2059
+ def _check_params(self, X):
2060
+ super()._check_params(X)
2061
+
2062
+ # batch_size
2063
+ self._batch_size = min(self.batch_size, X.shape[0])
2064
+
2065
+ # forget_factor
2066
+ self._rho = self.forget_factor ** (self._batch_size / X.shape[0])
2067
+
2068
+ # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
2069
+ if self._beta_loss < 1:
2070
+ self._gamma = 1.0 / (2.0 - self._beta_loss)
2071
+ elif self._beta_loss > 2:
2072
+ self._gamma = 1.0 / (self._beta_loss - 1.0)
2073
+ else:
2074
+ self._gamma = 1.0
2075
+
2076
+ # transform_max_iter
2077
+ self._transform_max_iter = (
2078
+ self.max_iter
2079
+ if self.transform_max_iter is None
2080
+ else self.transform_max_iter
2081
+ )
2082
+
2083
+ return self
2084
+
2085
+ def _solve_W(self, X, H, max_iter):
2086
+ """Minimize the objective function w.r.t W.
2087
+
2088
+ Update W with H being fixed, until convergence. This is the heart
2089
+ of `transform` but it's also used during `fit` when doing fresh restarts.
2090
+ """
2091
+ avg = np.sqrt(X.mean() / self._n_components)
2092
+ W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
2093
+ W_buffer = W.copy()
2094
+
2095
+ # Get scaled regularization terms. Done for each minibatch to take into account
2096
+ # variable sizes of minibatches.
2097
+ l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X)
2098
+
2099
+ for _ in range(max_iter):
2100
+ W, *_ = _multiplicative_update_w(
2101
+ X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
2102
+ )
2103
+
2104
+ W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
2105
+ if self.tol > 0 and W_diff <= self.tol:
2106
+ break
2107
+
2108
+ W_buffer[:] = W
2109
+
2110
+ return W
2111
+
2112
+ def _minibatch_step(self, X, W, H, update_H):
2113
+ """Perform the update of W and H for one minibatch."""
2114
+ batch_size = X.shape[0]
2115
+
2116
+ # get scaled regularization terms. Done for each minibatch to take into account
2117
+ # variable sizes of minibatches.
2118
+ l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
2119
+
2120
+ # update W
2121
+ if self.fresh_restarts or W is None:
2122
+ W = self._solve_W(X, H, self.fresh_restarts_max_iter)
2123
+ else:
2124
+ W, *_ = _multiplicative_update_w(
2125
+ X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
2126
+ )
2127
+
2128
+ # necessary for stability with beta_loss < 1
2129
+ if self._beta_loss < 1:
2130
+ W[W < np.finfo(np.float64).eps] = 0.0
2131
+
2132
+ batch_cost = (
2133
+ _beta_divergence(X, W, H, self._beta_loss)
2134
+ + l1_reg_W * W.sum()
2135
+ + l1_reg_H * H.sum()
2136
+ + l2_reg_W * (W**2).sum()
2137
+ + l2_reg_H * (H**2).sum()
2138
+ ) / batch_size
2139
+
2140
+ # update H (only at fit or fit_transform)
2141
+ if update_H:
2142
+ H[:] = _multiplicative_update_h(
2143
+ X,
2144
+ W,
2145
+ H,
2146
+ beta_loss=self._beta_loss,
2147
+ l1_reg_H=l1_reg_H,
2148
+ l2_reg_H=l2_reg_H,
2149
+ gamma=self._gamma,
2150
+ A=self._components_numerator,
2151
+ B=self._components_denominator,
2152
+ rho=self._rho,
2153
+ )
2154
+
2155
+ # necessary for stability with beta_loss < 1
2156
+ if self._beta_loss <= 1:
2157
+ H[H < np.finfo(np.float64).eps] = 0.0
2158
+
2159
+ return batch_cost
2160
+
2161
+ def _minibatch_convergence(
2162
+ self, X, batch_cost, H, H_buffer, n_samples, step, n_steps
2163
+ ):
2164
+ """Helper function to encapsulate the early stopping logic"""
2165
+ batch_size = X.shape[0]
2166
+
2167
+ # counts steps starting from 1 for user friendly verbose mode.
2168
+ step = step + 1
2169
+
2170
+ # Ignore first iteration because H is not updated yet.
2171
+ if step == 1:
2172
+ if self.verbose:
2173
+ print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
2174
+ return False
2175
+
2176
+ # Compute an Exponentially Weighted Average of the cost function to
2177
+ # monitor the convergence while discarding minibatch-local stochastic
2178
+ # variability: https://en.wikipedia.org/wiki/Moving_average
2179
+ if self._ewa_cost is None:
2180
+ self._ewa_cost = batch_cost
2181
+ else:
2182
+ alpha = batch_size / (n_samples + 1)
2183
+ alpha = min(alpha, 1)
2184
+ self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
2185
+
2186
+ # Log progress to be able to monitor convergence
2187
+ if self.verbose:
2188
+ print(
2189
+ f"Minibatch step {step}/{n_steps}: mean batch cost: "
2190
+ f"{batch_cost}, ewa cost: {self._ewa_cost}"
2191
+ )
2192
+
2193
+ # Early stopping based on change of H
2194
+ H_diff = linalg.norm(H - H_buffer) / linalg.norm(H)
2195
+ if self.tol > 0 and H_diff <= self.tol:
2196
+ if self.verbose:
2197
+ print(f"Converged (small H change) at step {step}/{n_steps}")
2198
+ return True
2199
+
2200
+ # Early stopping heuristic due to lack of improvement on smoothed
2201
+ # cost function
2202
+ if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
2203
+ self._no_improvement = 0
2204
+ self._ewa_cost_min = self._ewa_cost
2205
+ else:
2206
+ self._no_improvement += 1
2207
+
2208
+ if (
2209
+ self.max_no_improvement is not None
2210
+ and self._no_improvement >= self.max_no_improvement
2211
+ ):
2212
+ if self.verbose:
2213
+ print(
2214
+ "Converged (lack of improvement in objective function) "
2215
+ f"at step {step}/{n_steps}"
2216
+ )
2217
+ return True
2218
+
2219
+ return False
2220
+
2221
+ @_fit_context(prefer_skip_nested_validation=True)
2222
+ def fit_transform(self, X, y=None, W=None, H=None):
2223
+ """Learn a NMF model for the data X and returns the transformed data.
2224
+
2225
+ This is more efficient than calling fit followed by transform.
2226
+
2227
+ Parameters
2228
+ ----------
2229
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2230
+ Data matrix to be decomposed.
2231
+
2232
+ y : Ignored
2233
+ Not used, present here for API consistency by convention.
2234
+
2235
+ W : array-like of shape (n_samples, n_components), default=None
2236
+ If `init='custom'`, it is used as initial guess for the solution.
2237
+ If `None`, uses the initialisation method specified in `init`.
2238
+
2239
+ H : array-like of shape (n_components, n_features), default=None
2240
+ If `init='custom'`, it is used as initial guess for the solution.
2241
+ If `None`, uses the initialisation method specified in `init`.
2242
+
2243
+ Returns
2244
+ -------
2245
+ W : ndarray of shape (n_samples, n_components)
2246
+ Transformed data.
2247
+ """
2248
+ X = self._validate_data(
2249
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
2250
+ )
2251
+
2252
+ with config_context(assume_finite=True):
2253
+ W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H)
2254
+
2255
+ self.reconstruction_err_ = _beta_divergence(
2256
+ X, W, H, self._beta_loss, square_root=True
2257
+ )
2258
+
2259
+ self.n_components_ = H.shape[0]
2260
+ self.components_ = H
2261
+ self.n_iter_ = n_iter
2262
+ self.n_steps_ = n_steps
2263
+
2264
+ return W
2265
+
2266
+ def _fit_transform(self, X, W=None, H=None, update_H=True):
2267
+ """Learn a NMF model for the data X and returns the transformed data.
2268
+
2269
+ Parameters
2270
+ ----------
2271
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
2272
+ Data matrix to be decomposed.
2273
+
2274
+ W : array-like of shape (n_samples, n_components), default=None
2275
+ If `init='custom'`, it is used as initial guess for the solution.
2276
+ If `update_H=False`, it is initialised as an array of zeros, unless
2277
+ `solver='mu'`, then it is filled with values calculated by
2278
+ `np.sqrt(X.mean() / self._n_components)`.
2279
+ If `None`, uses the initialisation method specified in `init`.
2280
+
2281
+ H : array-like of shape (n_components, n_features), default=None
2282
+ If `init='custom'`, it is used as initial guess for the solution.
2283
+ If `update_H=False`, it is used as a constant, to solve for W only.
2284
+ If `None`, uses the initialisation method specified in `init`.
2285
+
2286
+ update_H : bool, default=True
2287
+ If True, both W and H will be estimated from initial guesses,
2288
+ this corresponds to a call to the `fit_transform` method.
2289
+ If False, only W will be estimated, this corresponds to a call
2290
+ to the `transform` method.
2291
+
2292
+ Returns
2293
+ -------
2294
+ W : ndarray of shape (n_samples, n_components)
2295
+ Transformed data.
2296
+
2297
+ H : ndarray of shape (n_components, n_features)
2298
+ Factorization matrix, sometimes called 'dictionary'.
2299
+
2300
+ n_iter : int
2301
+ Actual number of started iterations over the whole dataset.
2302
+
2303
+ n_steps : int
2304
+ Number of mini-batches processed.
2305
+ """
2306
+ check_non_negative(X, "MiniBatchNMF (input X)")
2307
+ self._check_params(X)
2308
+
2309
+ if X.min() == 0 and self._beta_loss <= 0:
2310
+ raise ValueError(
2311
+ "When beta_loss <= 0 and X contains zeros, "
2312
+ "the solver may diverge. Please add small values "
2313
+ "to X, or use a positive beta_loss."
2314
+ )
2315
+
2316
+ n_samples = X.shape[0]
2317
+
2318
+ # initialize or check W and H
2319
+ W, H = self._check_w_h(X, W, H, update_H)
2320
+ H_buffer = H.copy()
2321
+
2322
+ # Initialize auxiliary matrices
2323
+ self._components_numerator = H.copy()
2324
+ self._components_denominator = np.ones(H.shape, dtype=H.dtype)
2325
+
2326
+ # Attributes to monitor the convergence
2327
+ self._ewa_cost = None
2328
+ self._ewa_cost_min = None
2329
+ self._no_improvement = 0
2330
+
2331
+ batches = gen_batches(n_samples, self._batch_size)
2332
+ batches = itertools.cycle(batches)
2333
+ n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
2334
+ n_steps = self.max_iter * n_steps_per_iter
2335
+
2336
+ for i, batch in zip(range(n_steps), batches):
2337
+ batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H)
2338
+
2339
+ if update_H and self._minibatch_convergence(
2340
+ X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps
2341
+ ):
2342
+ break
2343
+
2344
+ H_buffer[:] = H
2345
+
2346
+ if self.fresh_restarts:
2347
+ W = self._solve_W(X, H, self._transform_max_iter)
2348
+
2349
+ n_steps = i + 1
2350
+ n_iter = int(np.ceil(n_steps / n_steps_per_iter))
2351
+
2352
+ if n_iter == self.max_iter and self.tol > 0:
2353
+ warnings.warn(
2354
+ (
2355
+ f"Maximum number of iterations {self.max_iter} reached. "
2356
+ "Increase it to improve convergence."
2357
+ ),
2358
+ ConvergenceWarning,
2359
+ )
2360
+
2361
+ return W, H, n_iter, n_steps
2362
+
2363
+ def transform(self, X):
2364
+ """Transform the data X according to the fitted MiniBatchNMF model.
2365
+
2366
+ Parameters
2367
+ ----------
2368
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2369
+ Data matrix to be transformed by the model.
2370
+
2371
+ Returns
2372
+ -------
2373
+ W : ndarray of shape (n_samples, n_components)
2374
+ Transformed data.
2375
+ """
2376
+ check_is_fitted(self)
2377
+ X = self._validate_data(
2378
+ X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False
2379
+ )
2380
+
2381
+ W = self._solve_W(X, self.components_, self._transform_max_iter)
2382
+
2383
+ return W
2384
+
2385
+ @_fit_context(prefer_skip_nested_validation=True)
2386
+ def partial_fit(self, X, y=None, W=None, H=None):
2387
+ """Update the model using the data in `X` as a mini-batch.
2388
+
2389
+ This method is expected to be called several times consecutively
2390
+ on different chunks of a dataset so as to implement out-of-core
2391
+ or online learning.
2392
+
2393
+ This is especially useful when the whole dataset is too big to fit in
2394
+ memory at once (see :ref:`scaling_strategies`).
2395
+
2396
+ Parameters
2397
+ ----------
2398
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2399
+ Data matrix to be decomposed.
2400
+
2401
+ y : Ignored
2402
+ Not used, present here for API consistency by convention.
2403
+
2404
+ W : array-like of shape (n_samples, n_components), default=None
2405
+ If `init='custom'`, it is used as initial guess for the solution.
2406
+ Only used for the first call to `partial_fit`.
2407
+
2408
+ H : array-like of shape (n_components, n_features), default=None
2409
+ If `init='custom'`, it is used as initial guess for the solution.
2410
+ Only used for the first call to `partial_fit`.
2411
+
2412
+ Returns
2413
+ -------
2414
+ self
2415
+ Returns the instance itself.
2416
+ """
2417
+ has_components = hasattr(self, "components_")
2418
+
2419
+ X = self._validate_data(
2420
+ X,
2421
+ accept_sparse=("csr", "csc"),
2422
+ dtype=[np.float64, np.float32],
2423
+ reset=not has_components,
2424
+ )
2425
+
2426
+ if not has_components:
2427
+ # This instance has not been fitted yet (fit or partial_fit)
2428
+ self._check_params(X)
2429
+ _, H = self._check_w_h(X, W=W, H=H, update_H=True)
2430
+
2431
+ self._components_numerator = H.copy()
2432
+ self._components_denominator = np.ones(H.shape, dtype=H.dtype)
2433
+ self.n_steps_ = 0
2434
+ else:
2435
+ H = self.components_
2436
+
2437
+ self._minibatch_step(X, None, H, update_H=True)
2438
+
2439
+ self.n_components_ = H.shape[0]
2440
+ self.components_ = H
2441
+ self.n_steps_ += 1
2442
+
2443
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (307 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_pca.py ADDED
@@ -0,0 +1,747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Principal Component Analysis.
2
+ """
3
+
4
+ # Author: Alexandre Gramfort <[email protected]>
5
+ # Olivier Grisel <[email protected]>
6
+ # Mathieu Blondel <[email protected]>
7
+ # Denis A. Engemann <[email protected]>
8
+ # Michael Eickenberg <[email protected]>
9
+ # Giorgio Patrini <[email protected]>
10
+ #
11
+ # License: BSD 3 clause
12
+
13
+ from math import log, sqrt
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ from scipy import linalg
18
+ from scipy.sparse import issparse
19
+ from scipy.sparse.linalg import svds
20
+ from scipy.special import gammaln
21
+
22
+ from ..base import _fit_context
23
+ from ..utils import check_random_state
24
+ from ..utils._arpack import _init_arpack_v0
25
+ from ..utils._array_api import _convert_to_numpy, get_namespace
26
+ from ..utils._param_validation import Interval, RealNotInt, StrOptions
27
+ from ..utils.extmath import fast_logdet, randomized_svd, stable_cumsum, svd_flip
28
+ from ..utils.sparsefuncs import _implicit_column_offset, mean_variance_axis
29
+ from ..utils.validation import check_is_fitted
30
+ from ._base import _BasePCA
31
+
32
+
33
+ def _assess_dimension(spectrum, rank, n_samples):
34
+ """Compute the log-likelihood of a rank ``rank`` dataset.
35
+
36
+ The dataset is assumed to be embedded in gaussian noise of shape(n,
37
+ dimf) having spectrum ``spectrum``. This implements the method of
38
+ T. P. Minka.
39
+
40
+ Parameters
41
+ ----------
42
+ spectrum : ndarray of shape (n_features,)
43
+ Data spectrum.
44
+ rank : int
45
+ Tested rank value. It should be strictly lower than n_features,
46
+ otherwise the method isn't specified (division by zero in equation
47
+ (31) from the paper).
48
+ n_samples : int
49
+ Number of samples.
50
+
51
+ Returns
52
+ -------
53
+ ll : float
54
+ The log-likelihood.
55
+
56
+ References
57
+ ----------
58
+ This implements the method of `Thomas P. Minka:
59
+ Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
60
+ <https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
61
+ """
62
+ xp, _ = get_namespace(spectrum)
63
+
64
+ n_features = spectrum.shape[0]
65
+ if not 1 <= rank < n_features:
66
+ raise ValueError("the tested rank should be in [1, n_features - 1]")
67
+
68
+ eps = 1e-15
69
+
70
+ if spectrum[rank - 1] < eps:
71
+ # When the tested rank is associated with a small eigenvalue, there's
72
+ # no point in computing the log-likelihood: it's going to be very
73
+ # small and won't be the max anyway. Also, it can lead to numerical
74
+ # issues below when computing pa, in particular in log((spectrum[i] -
75
+ # spectrum[j]) because this will take the log of something very small.
76
+ return -xp.inf
77
+
78
+ pu = -rank * log(2.0)
79
+ for i in range(1, rank + 1):
80
+ pu += (
81
+ gammaln((n_features - i + 1) / 2.0)
82
+ - log(xp.pi) * (n_features - i + 1) / 2.0
83
+ )
84
+
85
+ pl = xp.sum(xp.log(spectrum[:rank]))
86
+ pl = -pl * n_samples / 2.0
87
+
88
+ v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank))
89
+ pv = -log(v) * n_samples * (n_features - rank) / 2.0
90
+
91
+ m = n_features * rank - rank * (rank + 1.0) / 2.0
92
+ pp = log(2.0 * xp.pi) * (m + rank) / 2.0
93
+
94
+ pa = 0.0
95
+ spectrum_ = xp.asarray(spectrum, copy=True)
96
+ spectrum_[rank:n_features] = v
97
+ for i in range(rank):
98
+ for j in range(i + 1, spectrum.shape[0]):
99
+ pa += log(
100
+ (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])
101
+ ) + log(n_samples)
102
+
103
+ ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
104
+
105
+ return ll
106
+
107
+
108
+ def _infer_dimension(spectrum, n_samples):
109
+ """Infers the dimension of a dataset with a given spectrum.
110
+
111
+ The returned value will be in [1, n_features - 1].
112
+ """
113
+ xp, _ = get_namespace(spectrum)
114
+
115
+ ll = xp.empty_like(spectrum)
116
+ ll[0] = -xp.inf # we don't want to return n_components = 0
117
+ for rank in range(1, spectrum.shape[0]):
118
+ ll[rank] = _assess_dimension(spectrum, rank, n_samples)
119
+ return xp.argmax(ll)
120
+
121
+
122
+ class PCA(_BasePCA):
123
+ """Principal component analysis (PCA).
124
+
125
+ Linear dimensionality reduction using Singular Value Decomposition of the
126
+ data to project it to a lower dimensional space. The input data is centered
127
+ but not scaled for each feature before applying the SVD.
128
+
129
+ It uses the LAPACK implementation of the full SVD or a randomized truncated
130
+ SVD by the method of Halko et al. 2009, depending on the shape of the input
131
+ data and the number of components to extract.
132
+
133
+ It can also use the scipy.sparse.linalg ARPACK implementation of the
134
+ truncated SVD.
135
+
136
+ Notice that this class does not support sparse input. See
137
+ :class:`TruncatedSVD` for an alternative with sparse data.
138
+
139
+ For a usage example, see
140
+ :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py`
141
+
142
+ Read more in the :ref:`User Guide <PCA>`.
143
+
144
+ Parameters
145
+ ----------
146
+ n_components : int, float or 'mle', default=None
147
+ Number of components to keep.
148
+ if n_components is not set all components are kept::
149
+
150
+ n_components == min(n_samples, n_features)
151
+
152
+ If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
153
+ MLE is used to guess the dimension. Use of ``n_components == 'mle'``
154
+ will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
155
+
156
+ If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
157
+ number of components such that the amount of variance that needs to be
158
+ explained is greater than the percentage specified by n_components.
159
+
160
+ If ``svd_solver == 'arpack'``, the number of components must be
161
+ strictly less than the minimum of n_features and n_samples.
162
+
163
+ Hence, the None case results in::
164
+
165
+ n_components == min(n_samples, n_features) - 1
166
+
167
+ copy : bool, default=True
168
+ If False, data passed to fit are overwritten and running
169
+ fit(X).transform(X) will not yield the expected results,
170
+ use fit_transform(X) instead.
171
+
172
+ whiten : bool, default=False
173
+ When True (False by default) the `components_` vectors are multiplied
174
+ by the square root of n_samples and then divided by the singular values
175
+ to ensure uncorrelated outputs with unit component-wise variances.
176
+
177
+ Whitening will remove some information from the transformed signal
178
+ (the relative variance scales of the components) but can sometime
179
+ improve the predictive accuracy of the downstream estimators by
180
+ making their data respect some hard-wired assumptions.
181
+
182
+ svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto'
183
+ If auto :
184
+ The solver is selected by a default policy based on `X.shape` and
185
+ `n_components`: if the input data is larger than 500x500 and the
186
+ number of components to extract is lower than 80% of the smallest
187
+ dimension of the data, then the more efficient 'randomized'
188
+ method is enabled. Otherwise the exact full SVD is computed and
189
+ optionally truncated afterwards.
190
+ If full :
191
+ run exact full SVD calling the standard LAPACK solver via
192
+ `scipy.linalg.svd` and select the components by postprocessing
193
+ If arpack :
194
+ run SVD truncated to n_components calling ARPACK solver via
195
+ `scipy.sparse.linalg.svds`. It requires strictly
196
+ 0 < n_components < min(X.shape)
197
+ If randomized :
198
+ run randomized SVD by the method of Halko et al.
199
+
200
+ .. versionadded:: 0.18.0
201
+
202
+ tol : float, default=0.0
203
+ Tolerance for singular values computed by svd_solver == 'arpack'.
204
+ Must be of range [0.0, infinity).
205
+
206
+ .. versionadded:: 0.18.0
207
+
208
+ iterated_power : int or 'auto', default='auto'
209
+ Number of iterations for the power method computed by
210
+ svd_solver == 'randomized'.
211
+ Must be of range [0, infinity).
212
+
213
+ .. versionadded:: 0.18.0
214
+
215
+ n_oversamples : int, default=10
216
+ This parameter is only relevant when `svd_solver="randomized"`.
217
+ It corresponds to the additional number of random vectors to sample the
218
+ range of `X` so as to ensure proper conditioning. See
219
+ :func:`~sklearn.utils.extmath.randomized_svd` for more details.
220
+
221
+ .. versionadded:: 1.1
222
+
223
+ power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
224
+ Power iteration normalizer for randomized SVD solver.
225
+ Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
226
+ for more details.
227
+
228
+ .. versionadded:: 1.1
229
+
230
+ random_state : int, RandomState instance or None, default=None
231
+ Used when the 'arpack' or 'randomized' solvers are used. Pass an int
232
+ for reproducible results across multiple function calls.
233
+ See :term:`Glossary <random_state>`.
234
+
235
+ .. versionadded:: 0.18.0
236
+
237
+ Attributes
238
+ ----------
239
+ components_ : ndarray of shape (n_components, n_features)
240
+ Principal axes in feature space, representing the directions of
241
+ maximum variance in the data. Equivalently, the right singular
242
+ vectors of the centered input data, parallel to its eigenvectors.
243
+ The components are sorted by decreasing ``explained_variance_``.
244
+
245
+ explained_variance_ : ndarray of shape (n_components,)
246
+ The amount of variance explained by each of the selected components.
247
+ The variance estimation uses `n_samples - 1` degrees of freedom.
248
+
249
+ Equal to n_components largest eigenvalues
250
+ of the covariance matrix of X.
251
+
252
+ .. versionadded:: 0.18
253
+
254
+ explained_variance_ratio_ : ndarray of shape (n_components,)
255
+ Percentage of variance explained by each of the selected components.
256
+
257
+ If ``n_components`` is not set then all components are stored and the
258
+ sum of the ratios is equal to 1.0.
259
+
260
+ singular_values_ : ndarray of shape (n_components,)
261
+ The singular values corresponding to each of the selected components.
262
+ The singular values are equal to the 2-norms of the ``n_components``
263
+ variables in the lower-dimensional space.
264
+
265
+ .. versionadded:: 0.19
266
+
267
+ mean_ : ndarray of shape (n_features,)
268
+ Per-feature empirical mean, estimated from the training set.
269
+
270
+ Equal to `X.mean(axis=0)`.
271
+
272
+ n_components_ : int
273
+ The estimated number of components. When n_components is set
274
+ to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
275
+ number is estimated from input data. Otherwise it equals the parameter
276
+ n_components, or the lesser value of n_features and n_samples
277
+ if n_components is None.
278
+
279
+ n_samples_ : int
280
+ Number of samples in the training data.
281
+
282
+ noise_variance_ : float
283
+ The estimated noise covariance following the Probabilistic PCA model
284
+ from Tipping and Bishop 1999. See "Pattern Recognition and
285
+ Machine Learning" by C. Bishop, 12.2.1 p. 574 or
286
+ http://www.miketipping.com/papers/met-mppca.pdf. It is required to
287
+ compute the estimated data covariance and score samples.
288
+
289
+ Equal to the average of (min(n_features, n_samples) - n_components)
290
+ smallest eigenvalues of the covariance matrix of X.
291
+
292
+ n_features_in_ : int
293
+ Number of features seen during :term:`fit`.
294
+
295
+ .. versionadded:: 0.24
296
+
297
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
298
+ Names of features seen during :term:`fit`. Defined only when `X`
299
+ has feature names that are all strings.
300
+
301
+ .. versionadded:: 1.0
302
+
303
+ See Also
304
+ --------
305
+ KernelPCA : Kernel Principal Component Analysis.
306
+ SparsePCA : Sparse Principal Component Analysis.
307
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
308
+ IncrementalPCA : Incremental Principal Component Analysis.
309
+
310
+ References
311
+ ----------
312
+ For n_components == 'mle', this class uses the method from:
313
+ `Minka, T. P.. "Automatic choice of dimensionality for PCA".
314
+ In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
315
+
316
+ Implements the probabilistic PCA model from:
317
+ `Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
318
+ component analysis". Journal of the Royal Statistical Society:
319
+ Series B (Statistical Methodology), 61(3), 611-622.
320
+ <http://www.miketipping.com/papers/met-mppca.pdf>`_
321
+ via the score and score_samples methods.
322
+
323
+ For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
324
+
325
+ For svd_solver == 'randomized', see:
326
+ :doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
327
+ "Finding structure with randomness: Probabilistic algorithms for
328
+ constructing approximate matrix decompositions".
329
+ SIAM review, 53(2), 217-288.
330
+ <10.1137/090771806>`
331
+ and also
332
+ :doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
333
+ "A randomized algorithm for the decomposition of matrices".
334
+ Applied and Computational Harmonic Analysis, 30(1), 47-68.
335
+ <10.1016/j.acha.2010.02.003>`
336
+
337
+ Examples
338
+ --------
339
+ >>> import numpy as np
340
+ >>> from sklearn.decomposition import PCA
341
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
342
+ >>> pca = PCA(n_components=2)
343
+ >>> pca.fit(X)
344
+ PCA(n_components=2)
345
+ >>> print(pca.explained_variance_ratio_)
346
+ [0.9924... 0.0075...]
347
+ >>> print(pca.singular_values_)
348
+ [6.30061... 0.54980...]
349
+
350
+ >>> pca = PCA(n_components=2, svd_solver='full')
351
+ >>> pca.fit(X)
352
+ PCA(n_components=2, svd_solver='full')
353
+ >>> print(pca.explained_variance_ratio_)
354
+ [0.9924... 0.00755...]
355
+ >>> print(pca.singular_values_)
356
+ [6.30061... 0.54980...]
357
+
358
+ >>> pca = PCA(n_components=1, svd_solver='arpack')
359
+ >>> pca.fit(X)
360
+ PCA(n_components=1, svd_solver='arpack')
361
+ >>> print(pca.explained_variance_ratio_)
362
+ [0.99244...]
363
+ >>> print(pca.singular_values_)
364
+ [6.30061...]
365
+ """
366
+
367
+ _parameter_constraints: dict = {
368
+ "n_components": [
369
+ Interval(Integral, 0, None, closed="left"),
370
+ Interval(RealNotInt, 0, 1, closed="neither"),
371
+ StrOptions({"mle"}),
372
+ None,
373
+ ],
374
+ "copy": ["boolean"],
375
+ "whiten": ["boolean"],
376
+ "svd_solver": [StrOptions({"auto", "full", "arpack", "randomized"})],
377
+ "tol": [Interval(Real, 0, None, closed="left")],
378
+ "iterated_power": [
379
+ StrOptions({"auto"}),
380
+ Interval(Integral, 0, None, closed="left"),
381
+ ],
382
+ "n_oversamples": [Interval(Integral, 1, None, closed="left")],
383
+ "power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})],
384
+ "random_state": ["random_state"],
385
+ }
386
+
387
+ def __init__(
388
+ self,
389
+ n_components=None,
390
+ *,
391
+ copy=True,
392
+ whiten=False,
393
+ svd_solver="auto",
394
+ tol=0.0,
395
+ iterated_power="auto",
396
+ n_oversamples=10,
397
+ power_iteration_normalizer="auto",
398
+ random_state=None,
399
+ ):
400
+ self.n_components = n_components
401
+ self.copy = copy
402
+ self.whiten = whiten
403
+ self.svd_solver = svd_solver
404
+ self.tol = tol
405
+ self.iterated_power = iterated_power
406
+ self.n_oversamples = n_oversamples
407
+ self.power_iteration_normalizer = power_iteration_normalizer
408
+ self.random_state = random_state
409
+
410
+ @_fit_context(prefer_skip_nested_validation=True)
411
+ def fit(self, X, y=None):
412
+ """Fit the model with X.
413
+
414
+ Parameters
415
+ ----------
416
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
417
+ Training data, where `n_samples` is the number of samples
418
+ and `n_features` is the number of features.
419
+
420
+ y : Ignored
421
+ Ignored.
422
+
423
+ Returns
424
+ -------
425
+ self : object
426
+ Returns the instance itself.
427
+ """
428
+ self._fit(X)
429
+ return self
430
+
431
+ @_fit_context(prefer_skip_nested_validation=True)
432
+ def fit_transform(self, X, y=None):
433
+ """Fit the model with X and apply the dimensionality reduction on X.
434
+
435
+ Parameters
436
+ ----------
437
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
438
+ Training data, where `n_samples` is the number of samples
439
+ and `n_features` is the number of features.
440
+
441
+ y : Ignored
442
+ Ignored.
443
+
444
+ Returns
445
+ -------
446
+ X_new : ndarray of shape (n_samples, n_components)
447
+ Transformed values.
448
+
449
+ Notes
450
+ -----
451
+ This method returns a Fortran-ordered array. To convert it to a
452
+ C-ordered array, use 'np.ascontiguousarray'.
453
+ """
454
+ U, S, Vt = self._fit(X)
455
+ U = U[:, : self.n_components_]
456
+
457
+ if self.whiten:
458
+ # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
459
+ U *= sqrt(X.shape[0] - 1)
460
+ else:
461
+ # X_new = X * V = U * S * Vt * V = U * S
462
+ U *= S[: self.n_components_]
463
+
464
+ return U
465
+
466
+ def _fit(self, X):
467
+ """Dispatch to the right submethod depending on the chosen solver."""
468
+ xp, is_array_api_compliant = get_namespace(X)
469
+
470
+ # Raise an error for sparse input and unsupported svd_solver
471
+ if issparse(X) and self.svd_solver != "arpack":
472
+ raise TypeError(
473
+ 'PCA only support sparse inputs with the "arpack" solver, while '
474
+ f'"{self.svd_solver}" was passed. See TruncatedSVD for a possible'
475
+ " alternative."
476
+ )
477
+ # Raise an error for non-Numpy input and arpack solver.
478
+ if self.svd_solver == "arpack" and is_array_api_compliant:
479
+ raise ValueError(
480
+ "PCA with svd_solver='arpack' is not supported for Array API inputs."
481
+ )
482
+
483
+ X = self._validate_data(
484
+ X,
485
+ dtype=[xp.float64, xp.float32],
486
+ accept_sparse=("csr", "csc"),
487
+ ensure_2d=True,
488
+ copy=self.copy,
489
+ )
490
+
491
+ # Handle n_components==None
492
+ if self.n_components is None:
493
+ if self.svd_solver != "arpack":
494
+ n_components = min(X.shape)
495
+ else:
496
+ n_components = min(X.shape) - 1
497
+ else:
498
+ n_components = self.n_components
499
+
500
+ # Handle svd_solver
501
+ self._fit_svd_solver = self.svd_solver
502
+ if self._fit_svd_solver == "auto":
503
+ # Small problem or n_components == 'mle', just call full PCA
504
+ if max(X.shape) <= 500 or n_components == "mle":
505
+ self._fit_svd_solver = "full"
506
+ elif 1 <= n_components < 0.8 * min(X.shape):
507
+ self._fit_svd_solver = "randomized"
508
+ # This is also the case of n_components in (0,1)
509
+ else:
510
+ self._fit_svd_solver = "full"
511
+
512
+ # Call different fits for either full or truncated SVD
513
+ if self._fit_svd_solver == "full":
514
+ return self._fit_full(X, n_components)
515
+ elif self._fit_svd_solver in ["arpack", "randomized"]:
516
+ return self._fit_truncated(X, n_components, self._fit_svd_solver)
517
+
518
+ def _fit_full(self, X, n_components):
519
+ """Fit the model by computing full SVD on X."""
520
+ xp, is_array_api_compliant = get_namespace(X)
521
+
522
+ n_samples, n_features = X.shape
523
+
524
+ if n_components == "mle":
525
+ if n_samples < n_features:
526
+ raise ValueError(
527
+ "n_components='mle' is only supported if n_samples >= n_features"
528
+ )
529
+ elif not 0 <= n_components <= min(n_samples, n_features):
530
+ raise ValueError(
531
+ "n_components=%r must be between 0 and "
532
+ "min(n_samples, n_features)=%r with "
533
+ "svd_solver='full'" % (n_components, min(n_samples, n_features))
534
+ )
535
+
536
+ # Center data
537
+ self.mean_ = xp.mean(X, axis=0)
538
+ X -= self.mean_
539
+
540
+ if not is_array_api_compliant:
541
+ # Use scipy.linalg with NumPy/SciPy inputs for the sake of not
542
+ # introducing unanticipated behavior changes. In the long run we
543
+ # could instead decide to always use xp.linalg.svd for all inputs,
544
+ # but that would make this code rely on numpy's SVD instead of
545
+ # scipy's. It's not 100% clear whether they use the same LAPACK
546
+ # solver by default though (assuming both are built against the
547
+ # same BLAS).
548
+ U, S, Vt = linalg.svd(X, full_matrices=False)
549
+ else:
550
+ U, S, Vt = xp.linalg.svd(X, full_matrices=False)
551
+ # flip eigenvectors' sign to enforce deterministic output
552
+ U, Vt = svd_flip(U, Vt)
553
+
554
+ components_ = Vt
555
+
556
+ # Get variance explained by singular values
557
+ explained_variance_ = (S**2) / (n_samples - 1)
558
+ total_var = xp.sum(explained_variance_)
559
+ explained_variance_ratio_ = explained_variance_ / total_var
560
+ singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
561
+
562
+ # Postprocess the number of components required
563
+ if n_components == "mle":
564
+ n_components = _infer_dimension(explained_variance_, n_samples)
565
+ elif 0 < n_components < 1.0:
566
+ # number of components for which the cumulated explained
567
+ # variance percentage is superior to the desired threshold
568
+ # side='right' ensures that number of features selected
569
+ # their variance is always greater than n_components float
570
+ # passed. More discussion in issue: #15669
571
+ if is_array_api_compliant:
572
+ # Convert to numpy as xp.cumsum and xp.searchsorted are not
573
+ # part of the Array API standard yet:
574
+ #
575
+ # https://github.com/data-apis/array-api/issues/597
576
+ # https://github.com/data-apis/array-api/issues/688
577
+ #
578
+ # Furthermore, it's not always safe to call them for namespaces
579
+ # that already implement them: for instance as
580
+ # cupy.searchsorted does not accept a float as second argument.
581
+ explained_variance_ratio_np = _convert_to_numpy(
582
+ explained_variance_ratio_, xp=xp
583
+ )
584
+ else:
585
+ explained_variance_ratio_np = explained_variance_ratio_
586
+ ratio_cumsum = stable_cumsum(explained_variance_ratio_np)
587
+ n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1
588
+
589
+ # Compute noise covariance using Probabilistic PCA model
590
+ # The sigma2 maximum likelihood (cf. eq. 12.46)
591
+ if n_components < min(n_features, n_samples):
592
+ self.noise_variance_ = xp.mean(explained_variance_[n_components:])
593
+ else:
594
+ self.noise_variance_ = 0.0
595
+
596
+ self.n_samples_ = n_samples
597
+ self.components_ = components_[:n_components, :]
598
+ self.n_components_ = n_components
599
+ self.explained_variance_ = explained_variance_[:n_components]
600
+ self.explained_variance_ratio_ = explained_variance_ratio_[:n_components]
601
+ self.singular_values_ = singular_values_[:n_components]
602
+
603
+ return U, S, Vt
604
+
605
+ def _fit_truncated(self, X, n_components, svd_solver):
606
+ """Fit the model by computing truncated SVD (by ARPACK or randomized)
607
+ on X.
608
+ """
609
+ xp, _ = get_namespace(X)
610
+
611
+ n_samples, n_features = X.shape
612
+
613
+ if isinstance(n_components, str):
614
+ raise ValueError(
615
+ "n_components=%r cannot be a string with svd_solver='%s'"
616
+ % (n_components, svd_solver)
617
+ )
618
+ elif not 1 <= n_components <= min(n_samples, n_features):
619
+ raise ValueError(
620
+ "n_components=%r must be between 1 and "
621
+ "min(n_samples, n_features)=%r with "
622
+ "svd_solver='%s'"
623
+ % (n_components, min(n_samples, n_features), svd_solver)
624
+ )
625
+ elif svd_solver == "arpack" and n_components == min(n_samples, n_features):
626
+ raise ValueError(
627
+ "n_components=%r must be strictly less than "
628
+ "min(n_samples, n_features)=%r with "
629
+ "svd_solver='%s'"
630
+ % (n_components, min(n_samples, n_features), svd_solver)
631
+ )
632
+
633
+ random_state = check_random_state(self.random_state)
634
+
635
+ # Center data
636
+ total_var = None
637
+ if issparse(X):
638
+ self.mean_, var = mean_variance_axis(X, axis=0)
639
+ total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1
640
+ X = _implicit_column_offset(X, self.mean_)
641
+ else:
642
+ self.mean_ = xp.mean(X, axis=0)
643
+ X -= self.mean_
644
+
645
+ if svd_solver == "arpack":
646
+ v0 = _init_arpack_v0(min(X.shape), random_state)
647
+ U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0)
648
+ # svds doesn't abide by scipy.linalg.svd/randomized_svd
649
+ # conventions, so reverse its outputs.
650
+ S = S[::-1]
651
+ # flip eigenvectors' sign to enforce deterministic output
652
+ U, Vt = svd_flip(U[:, ::-1], Vt[::-1])
653
+
654
+ elif svd_solver == "randomized":
655
+ # sign flipping is done inside
656
+ U, S, Vt = randomized_svd(
657
+ X,
658
+ n_components=n_components,
659
+ n_oversamples=self.n_oversamples,
660
+ n_iter=self.iterated_power,
661
+ power_iteration_normalizer=self.power_iteration_normalizer,
662
+ flip_sign=True,
663
+ random_state=random_state,
664
+ )
665
+
666
+ self.n_samples_ = n_samples
667
+ self.components_ = Vt
668
+ self.n_components_ = n_components
669
+
670
+ # Get variance explained by singular values
671
+ self.explained_variance_ = (S**2) / (n_samples - 1)
672
+
673
+ # Workaround in-place variance calculation since at the time numpy
674
+ # did not have a way to calculate variance in-place.
675
+ #
676
+ # TODO: update this code to either:
677
+ # * Use the array-api variance calculation, unless memory usage suffers
678
+ # * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api
679
+ # See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991
680
+ if total_var is None:
681
+ N = X.shape[0] - 1
682
+ X **= 2
683
+ total_var = xp.sum(X) / N
684
+
685
+ self.explained_variance_ratio_ = self.explained_variance_ / total_var
686
+ self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
687
+
688
+ if self.n_components_ < min(n_features, n_samples):
689
+ self.noise_variance_ = total_var - xp.sum(self.explained_variance_)
690
+ self.noise_variance_ /= min(n_features, n_samples) - n_components
691
+ else:
692
+ self.noise_variance_ = 0.0
693
+
694
+ return U, S, Vt
695
+
696
+ def score_samples(self, X):
697
+ """Return the log-likelihood of each sample.
698
+
699
+ See. "Pattern Recognition and Machine Learning"
700
+ by C. Bishop, 12.2.1 p. 574
701
+ or http://www.miketipping.com/papers/met-mppca.pdf
702
+
703
+ Parameters
704
+ ----------
705
+ X : array-like of shape (n_samples, n_features)
706
+ The data.
707
+
708
+ Returns
709
+ -------
710
+ ll : ndarray of shape (n_samples,)
711
+ Log-likelihood of each sample under the current model.
712
+ """
713
+ check_is_fitted(self)
714
+ xp, _ = get_namespace(X)
715
+ X = self._validate_data(X, dtype=[xp.float64, xp.float32], reset=False)
716
+ Xr = X - self.mean_
717
+ n_features = X.shape[1]
718
+ precision = self.get_precision()
719
+ log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1)
720
+ log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
721
+ return log_like
722
+
723
+ def score(self, X, y=None):
724
+ """Return the average log-likelihood of all samples.
725
+
726
+ See. "Pattern Recognition and Machine Learning"
727
+ by C. Bishop, 12.2.1 p. 574
728
+ or http://www.miketipping.com/papers/met-mppca.pdf
729
+
730
+ Parameters
731
+ ----------
732
+ X : array-like of shape (n_samples, n_features)
733
+ The data.
734
+
735
+ y : Ignored
736
+ Ignored.
737
+
738
+ Returns
739
+ -------
740
+ ll : float
741
+ Average log-likelihood of the samples under the current model.
742
+ """
743
+ xp, _ = get_namespace(X)
744
+ return float(xp.mean(self.score_samples(X)))
745
+
746
+ def _more_tags(self):
747
+ return {"preserves_dtype": [np.float64, np.float32], "array_api_support": True}
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Matrix factorization with Sparse PCA."""
2
+ # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Integral, Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import (
10
+ BaseEstimator,
11
+ ClassNamePrefixFeaturesOutMixin,
12
+ TransformerMixin,
13
+ _fit_context,
14
+ )
15
+ from ..linear_model import ridge_regression
16
+ from ..utils import check_random_state
17
+ from ..utils._param_validation import Hidden, Interval, StrOptions
18
+ from ..utils.extmath import svd_flip
19
+ from ..utils.validation import check_array, check_is_fitted
20
+ from ._dict_learning import MiniBatchDictionaryLearning, dict_learning
21
+
22
+
23
+ class _BaseSparsePCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
24
+ """Base class for SparsePCA and MiniBatchSparsePCA"""
25
+
26
+ _parameter_constraints: dict = {
27
+ "n_components": [None, Interval(Integral, 1, None, closed="left")],
28
+ "alpha": [Interval(Real, 0.0, None, closed="left")],
29
+ "ridge_alpha": [Interval(Real, 0.0, None, closed="left")],
30
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
31
+ "tol": [Interval(Real, 0.0, None, closed="left")],
32
+ "method": [StrOptions({"lars", "cd"})],
33
+ "n_jobs": [Integral, None],
34
+ "verbose": ["verbose"],
35
+ "random_state": ["random_state"],
36
+ }
37
+
38
+ def __init__(
39
+ self,
40
+ n_components=None,
41
+ *,
42
+ alpha=1,
43
+ ridge_alpha=0.01,
44
+ max_iter=1000,
45
+ tol=1e-8,
46
+ method="lars",
47
+ n_jobs=None,
48
+ verbose=False,
49
+ random_state=None,
50
+ ):
51
+ self.n_components = n_components
52
+ self.alpha = alpha
53
+ self.ridge_alpha = ridge_alpha
54
+ self.max_iter = max_iter
55
+ self.tol = tol
56
+ self.method = method
57
+ self.n_jobs = n_jobs
58
+ self.verbose = verbose
59
+ self.random_state = random_state
60
+
61
+ @_fit_context(prefer_skip_nested_validation=True)
62
+ def fit(self, X, y=None):
63
+ """Fit the model from data in X.
64
+
65
+ Parameters
66
+ ----------
67
+ X : array-like of shape (n_samples, n_features)
68
+ Training vector, where `n_samples` is the number of samples
69
+ and `n_features` is the number of features.
70
+
71
+ y : Ignored
72
+ Not used, present here for API consistency by convention.
73
+
74
+ Returns
75
+ -------
76
+ self : object
77
+ Returns the instance itself.
78
+ """
79
+ random_state = check_random_state(self.random_state)
80
+ X = self._validate_data(X)
81
+
82
+ self.mean_ = X.mean(axis=0)
83
+ X = X - self.mean_
84
+
85
+ if self.n_components is None:
86
+ n_components = X.shape[1]
87
+ else:
88
+ n_components = self.n_components
89
+
90
+ return self._fit(X, n_components, random_state)
91
+
92
+ def transform(self, X):
93
+ """Least Squares projection of the data onto the sparse components.
94
+
95
+ To avoid instability issues in case the system is under-determined,
96
+ regularization can be applied (Ridge regression) via the
97
+ `ridge_alpha` parameter.
98
+
99
+ Note that Sparse PCA components orthogonality is not enforced as in PCA
100
+ hence one cannot use a simple linear projection.
101
+
102
+ Parameters
103
+ ----------
104
+ X : ndarray of shape (n_samples, n_features)
105
+ Test data to be transformed, must have the same number of
106
+ features as the data used to train the model.
107
+
108
+ Returns
109
+ -------
110
+ X_new : ndarray of shape (n_samples, n_components)
111
+ Transformed data.
112
+ """
113
+ check_is_fitted(self)
114
+
115
+ X = self._validate_data(X, reset=False)
116
+ X = X - self.mean_
117
+
118
+ U = ridge_regression(
119
+ self.components_.T, X.T, self.ridge_alpha, solver="cholesky"
120
+ )
121
+
122
+ return U
123
+
124
+ def inverse_transform(self, X):
125
+ """Transform data from the latent space to the original space.
126
+
127
+ This inversion is an approximation due to the loss of information
128
+ induced by the forward decomposition.
129
+
130
+ .. versionadded:: 1.2
131
+
132
+ Parameters
133
+ ----------
134
+ X : ndarray of shape (n_samples, n_components)
135
+ Data in the latent space.
136
+
137
+ Returns
138
+ -------
139
+ X_original : ndarray of shape (n_samples, n_features)
140
+ Reconstructed data in the original space.
141
+ """
142
+ check_is_fitted(self)
143
+ X = check_array(X)
144
+
145
+ return (X @ self.components_) + self.mean_
146
+
147
+ @property
148
+ def _n_features_out(self):
149
+ """Number of transformed output features."""
150
+ return self.components_.shape[0]
151
+
152
+ def _more_tags(self):
153
+ return {
154
+ "preserves_dtype": [np.float64, np.float32],
155
+ }
156
+
157
+
158
+ class SparsePCA(_BaseSparsePCA):
159
+ """Sparse Principal Components Analysis (SparsePCA).
160
+
161
+ Finds the set of sparse components that can optimally reconstruct
162
+ the data. The amount of sparseness is controllable by the coefficient
163
+ of the L1 penalty, given by the parameter alpha.
164
+
165
+ Read more in the :ref:`User Guide <SparsePCA>`.
166
+
167
+ Parameters
168
+ ----------
169
+ n_components : int, default=None
170
+ Number of sparse atoms to extract. If None, then ``n_components``
171
+ is set to ``n_features``.
172
+
173
+ alpha : float, default=1
174
+ Sparsity controlling parameter. Higher values lead to sparser
175
+ components.
176
+
177
+ ridge_alpha : float, default=0.01
178
+ Amount of ridge shrinkage to apply in order to improve
179
+ conditioning when calling the transform method.
180
+
181
+ max_iter : int, default=1000
182
+ Maximum number of iterations to perform.
183
+
184
+ tol : float, default=1e-8
185
+ Tolerance for the stopping condition.
186
+
187
+ method : {'lars', 'cd'}, default='lars'
188
+ Method to be used for optimization.
189
+ lars: uses the least angle regression method to solve the lasso problem
190
+ (linear_model.lars_path)
191
+ cd: uses the coordinate descent method to compute the
192
+ Lasso solution (linear_model.Lasso). Lars will be faster if
193
+ the estimated components are sparse.
194
+
195
+ n_jobs : int, default=None
196
+ Number of parallel jobs to run.
197
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
198
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
199
+ for more details.
200
+
201
+ U_init : ndarray of shape (n_samples, n_components), default=None
202
+ Initial values for the loadings for warm restart scenarios. Only used
203
+ if `U_init` and `V_init` are not None.
204
+
205
+ V_init : ndarray of shape (n_components, n_features), default=None
206
+ Initial values for the components for warm restart scenarios. Only used
207
+ if `U_init` and `V_init` are not None.
208
+
209
+ verbose : int or bool, default=False
210
+ Controls the verbosity; the higher, the more messages. Defaults to 0.
211
+
212
+ random_state : int, RandomState instance or None, default=None
213
+ Used during dictionary learning. Pass an int for reproducible results
214
+ across multiple function calls.
215
+ See :term:`Glossary <random_state>`.
216
+
217
+ Attributes
218
+ ----------
219
+ components_ : ndarray of shape (n_components, n_features)
220
+ Sparse components extracted from the data.
221
+
222
+ error_ : ndarray
223
+ Vector of errors at each iteration.
224
+
225
+ n_components_ : int
226
+ Estimated number of components.
227
+
228
+ .. versionadded:: 0.23
229
+
230
+ n_iter_ : int
231
+ Number of iterations run.
232
+
233
+ mean_ : ndarray of shape (n_features,)
234
+ Per-feature empirical mean, estimated from the training set.
235
+ Equal to ``X.mean(axis=0)``.
236
+
237
+ n_features_in_ : int
238
+ Number of features seen during :term:`fit`.
239
+
240
+ .. versionadded:: 0.24
241
+
242
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
243
+ Names of features seen during :term:`fit`. Defined only when `X`
244
+ has feature names that are all strings.
245
+
246
+ .. versionadded:: 1.0
247
+
248
+ See Also
249
+ --------
250
+ PCA : Principal Component Analysis implementation.
251
+ MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less
252
+ accurate.
253
+ DictionaryLearning : Generic dictionary learning problem using a sparse code.
254
+
255
+ Examples
256
+ --------
257
+ >>> import numpy as np
258
+ >>> from sklearn.datasets import make_friedman1
259
+ >>> from sklearn.decomposition import SparsePCA
260
+ >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
261
+ >>> transformer = SparsePCA(n_components=5, random_state=0)
262
+ >>> transformer.fit(X)
263
+ SparsePCA(...)
264
+ >>> X_transformed = transformer.transform(X)
265
+ >>> X_transformed.shape
266
+ (200, 5)
267
+ >>> # most values in the components_ are zero (sparsity)
268
+ >>> np.mean(transformer.components_ == 0)
269
+ 0.9666...
270
+ """
271
+
272
+ _parameter_constraints: dict = {
273
+ **_BaseSparsePCA._parameter_constraints,
274
+ "U_init": [None, np.ndarray],
275
+ "V_init": [None, np.ndarray],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ n_components=None,
281
+ *,
282
+ alpha=1,
283
+ ridge_alpha=0.01,
284
+ max_iter=1000,
285
+ tol=1e-8,
286
+ method="lars",
287
+ n_jobs=None,
288
+ U_init=None,
289
+ V_init=None,
290
+ verbose=False,
291
+ random_state=None,
292
+ ):
293
+ super().__init__(
294
+ n_components=n_components,
295
+ alpha=alpha,
296
+ ridge_alpha=ridge_alpha,
297
+ max_iter=max_iter,
298
+ tol=tol,
299
+ method=method,
300
+ n_jobs=n_jobs,
301
+ verbose=verbose,
302
+ random_state=random_state,
303
+ )
304
+ self.U_init = U_init
305
+ self.V_init = V_init
306
+
307
+ def _fit(self, X, n_components, random_state):
308
+ """Specialized `fit` for SparsePCA."""
309
+
310
+ code_init = self.V_init.T if self.V_init is not None else None
311
+ dict_init = self.U_init.T if self.U_init is not None else None
312
+ code, dictionary, E, self.n_iter_ = dict_learning(
313
+ X.T,
314
+ n_components,
315
+ alpha=self.alpha,
316
+ tol=self.tol,
317
+ max_iter=self.max_iter,
318
+ method=self.method,
319
+ n_jobs=self.n_jobs,
320
+ verbose=self.verbose,
321
+ random_state=random_state,
322
+ code_init=code_init,
323
+ dict_init=dict_init,
324
+ return_n_iter=True,
325
+ )
326
+ # flip eigenvectors' sign to enforce deterministic output
327
+ code, dictionary = svd_flip(code, dictionary, u_based_decision=False)
328
+ self.components_ = code.T
329
+ components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
330
+ components_norm[components_norm == 0] = 1
331
+ self.components_ /= components_norm
332
+ self.n_components_ = len(self.components_)
333
+
334
+ self.error_ = E
335
+ return self
336
+
337
+
338
+ class MiniBatchSparsePCA(_BaseSparsePCA):
339
+ """Mini-batch Sparse Principal Components Analysis.
340
+
341
+ Finds the set of sparse components that can optimally reconstruct
342
+ the data. The amount of sparseness is controllable by the coefficient
343
+ of the L1 penalty, given by the parameter alpha.
344
+
345
+ For an example comparing sparse PCA to PCA, see
346
+ :ref:`sphx_glr_auto_examples_decomposition_plot_faces_decomposition.py`
347
+
348
+ Read more in the :ref:`User Guide <SparsePCA>`.
349
+
350
+ Parameters
351
+ ----------
352
+ n_components : int, default=None
353
+ Number of sparse atoms to extract. If None, then ``n_components``
354
+ is set to ``n_features``.
355
+
356
+ alpha : int, default=1
357
+ Sparsity controlling parameter. Higher values lead to sparser
358
+ components.
359
+
360
+ ridge_alpha : float, default=0.01
361
+ Amount of ridge shrinkage to apply in order to improve
362
+ conditioning when calling the transform method.
363
+
364
+ max_iter : int, default=1_000
365
+ Maximum number of iterations over the complete dataset before
366
+ stopping independently of any early stopping criterion heuristics.
367
+
368
+ .. versionadded:: 1.2
369
+
370
+ .. deprecated:: 1.4
371
+ `max_iter=None` is deprecated in 1.4 and will be removed in 1.6.
372
+ Use the default value (i.e. `100`) instead.
373
+
374
+ callback : callable, default=None
375
+ Callable that gets invoked every five iterations.
376
+
377
+ batch_size : int, default=3
378
+ The number of features to take in each mini batch.
379
+
380
+ verbose : int or bool, default=False
381
+ Controls the verbosity; the higher, the more messages. Defaults to 0.
382
+
383
+ shuffle : bool, default=True
384
+ Whether to shuffle the data before splitting it in batches.
385
+
386
+ n_jobs : int, default=None
387
+ Number of parallel jobs to run.
388
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
389
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
390
+ for more details.
391
+
392
+ method : {'lars', 'cd'}, default='lars'
393
+ Method to be used for optimization.
394
+ lars: uses the least angle regression method to solve the lasso problem
395
+ (linear_model.lars_path)
396
+ cd: uses the coordinate descent method to compute the
397
+ Lasso solution (linear_model.Lasso). Lars will be faster if
398
+ the estimated components are sparse.
399
+
400
+ random_state : int, RandomState instance or None, default=None
401
+ Used for random shuffling when ``shuffle`` is set to ``True``,
402
+ during online dictionary learning. Pass an int for reproducible results
403
+ across multiple function calls.
404
+ See :term:`Glossary <random_state>`.
405
+
406
+ tol : float, default=1e-3
407
+ Control early stopping based on the norm of the differences in the
408
+ dictionary between 2 steps.
409
+
410
+ To disable early stopping based on changes in the dictionary, set
411
+ `tol` to 0.0.
412
+
413
+ .. versionadded:: 1.1
414
+
415
+ max_no_improvement : int or None, default=10
416
+ Control early stopping based on the consecutive number of mini batches
417
+ that does not yield an improvement on the smoothed cost function.
418
+
419
+ To disable convergence detection based on cost function, set
420
+ `max_no_improvement` to `None`.
421
+
422
+ .. versionadded:: 1.1
423
+
424
+ Attributes
425
+ ----------
426
+ components_ : ndarray of shape (n_components, n_features)
427
+ Sparse components extracted from the data.
428
+
429
+ n_components_ : int
430
+ Estimated number of components.
431
+
432
+ .. versionadded:: 0.23
433
+
434
+ n_iter_ : int
435
+ Number of iterations run.
436
+
437
+ mean_ : ndarray of shape (n_features,)
438
+ Per-feature empirical mean, estimated from the training set.
439
+ Equal to ``X.mean(axis=0)``.
440
+
441
+ n_features_in_ : int
442
+ Number of features seen during :term:`fit`.
443
+
444
+ .. versionadded:: 0.24
445
+
446
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
447
+ Names of features seen during :term:`fit`. Defined only when `X`
448
+ has feature names that are all strings.
449
+
450
+ .. versionadded:: 1.0
451
+
452
+ See Also
453
+ --------
454
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
455
+ IncrementalPCA : Incremental principal components analysis.
456
+ PCA : Principal component analysis.
457
+ SparsePCA : Sparse Principal Components Analysis.
458
+ TruncatedSVD : Dimensionality reduction using truncated SVD.
459
+
460
+ Examples
461
+ --------
462
+ >>> import numpy as np
463
+ >>> from sklearn.datasets import make_friedman1
464
+ >>> from sklearn.decomposition import MiniBatchSparsePCA
465
+ >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
466
+ >>> transformer = MiniBatchSparsePCA(n_components=5, batch_size=50,
467
+ ... max_iter=10, random_state=0)
468
+ >>> transformer.fit(X)
469
+ MiniBatchSparsePCA(...)
470
+ >>> X_transformed = transformer.transform(X)
471
+ >>> X_transformed.shape
472
+ (200, 5)
473
+ >>> # most values in the components_ are zero (sparsity)
474
+ >>> np.mean(transformer.components_ == 0)
475
+ 0.9...
476
+ """
477
+
478
+ _parameter_constraints: dict = {
479
+ **_BaseSparsePCA._parameter_constraints,
480
+ "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)],
481
+ "callback": [None, callable],
482
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
483
+ "shuffle": ["boolean"],
484
+ "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
485
+ }
486
+
487
+ def __init__(
488
+ self,
489
+ n_components=None,
490
+ *,
491
+ alpha=1,
492
+ ridge_alpha=0.01,
493
+ max_iter=1_000,
494
+ callback=None,
495
+ batch_size=3,
496
+ verbose=False,
497
+ shuffle=True,
498
+ n_jobs=None,
499
+ method="lars",
500
+ random_state=None,
501
+ tol=1e-3,
502
+ max_no_improvement=10,
503
+ ):
504
+ super().__init__(
505
+ n_components=n_components,
506
+ alpha=alpha,
507
+ ridge_alpha=ridge_alpha,
508
+ max_iter=max_iter,
509
+ tol=tol,
510
+ method=method,
511
+ n_jobs=n_jobs,
512
+ verbose=verbose,
513
+ random_state=random_state,
514
+ )
515
+ self.callback = callback
516
+ self.batch_size = batch_size
517
+ self.shuffle = shuffle
518
+ self.max_no_improvement = max_no_improvement
519
+
520
+ def _fit(self, X, n_components, random_state):
521
+ """Specialized `fit` for MiniBatchSparsePCA."""
522
+
523
+ transform_algorithm = "lasso_" + self.method
524
+ est = MiniBatchDictionaryLearning(
525
+ n_components=n_components,
526
+ alpha=self.alpha,
527
+ max_iter=self.max_iter,
528
+ dict_init=None,
529
+ batch_size=self.batch_size,
530
+ shuffle=self.shuffle,
531
+ n_jobs=self.n_jobs,
532
+ fit_algorithm=self.method,
533
+ random_state=random_state,
534
+ transform_algorithm=transform_algorithm,
535
+ transform_alpha=self.alpha,
536
+ verbose=self.verbose,
537
+ callback=self.callback,
538
+ tol=self.tol,
539
+ max_no_improvement=self.max_no_improvement,
540
+ )
541
+ est.set_output(transform="default")
542
+ est.fit(X.T)
543
+
544
+ self.components_, self.n_iter_ = est.transform(X.T).T, est.n_iter_
545
+
546
+ components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
547
+ components_norm[components_norm == 0] = 1
548
+ self.components_ /= components_norm
549
+ self.n_components_ = len(self.components_)
550
+
551
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
2
+ """
3
+
4
+ # Author: Lars Buitinck
5
+ # Olivier Grisel <[email protected]>
6
+ # Michael Becker <[email protected]>
7
+ # License: 3-clause BSD.
8
+
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ import scipy.sparse as sp
13
+ from scipy.sparse.linalg import svds
14
+
15
+ from ..base import (
16
+ BaseEstimator,
17
+ ClassNamePrefixFeaturesOutMixin,
18
+ TransformerMixin,
19
+ _fit_context,
20
+ )
21
+ from ..utils import check_array, check_random_state
22
+ from ..utils._arpack import _init_arpack_v0
23
+ from ..utils._param_validation import Interval, StrOptions
24
+ from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
25
+ from ..utils.sparsefuncs import mean_variance_axis
26
+ from ..utils.validation import check_is_fitted
27
+
28
+ __all__ = ["TruncatedSVD"]
29
+
30
+
31
+ class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
32
+ """Dimensionality reduction using truncated SVD (aka LSA).
33
+
34
+ This transformer performs linear dimensionality reduction by means of
35
+ truncated singular value decomposition (SVD). Contrary to PCA, this
36
+ estimator does not center the data before computing the singular value
37
+ decomposition. This means it can work with sparse matrices
38
+ efficiently.
39
+
40
+ In particular, truncated SVD works on term count/tf-idf matrices as
41
+ returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In
42
+ that context, it is known as latent semantic analysis (LSA).
43
+
44
+ This estimator supports two algorithms: a fast randomized SVD solver, and
45
+ a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or
46
+ `X.T * X`, whichever is more efficient.
47
+
48
+ Read more in the :ref:`User Guide <LSA>`.
49
+
50
+ Parameters
51
+ ----------
52
+ n_components : int, default=2
53
+ Desired dimensionality of output data.
54
+ If algorithm='arpack', must be strictly less than the number of features.
55
+ If algorithm='randomized', must be less than or equal to the number of features.
56
+ The default value is useful for visualisation. For LSA, a value of
57
+ 100 is recommended.
58
+
59
+ algorithm : {'arpack', 'randomized'}, default='randomized'
60
+ SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
61
+ (scipy.sparse.linalg.svds), or "randomized" for the randomized
62
+ algorithm due to Halko (2009).
63
+
64
+ n_iter : int, default=5
65
+ Number of iterations for randomized SVD solver. Not used by ARPACK. The
66
+ default is larger than the default in
67
+ :func:`~sklearn.utils.extmath.randomized_svd` to handle sparse
68
+ matrices that may have large slowly decaying spectrum.
69
+
70
+ n_oversamples : int, default=10
71
+ Number of oversamples for randomized SVD solver. Not used by ARPACK.
72
+ See :func:`~sklearn.utils.extmath.randomized_svd` for a complete
73
+ description.
74
+
75
+ .. versionadded:: 1.1
76
+
77
+ power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
78
+ Power iteration normalizer for randomized SVD solver.
79
+ Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
80
+ for more details.
81
+
82
+ .. versionadded:: 1.1
83
+
84
+ random_state : int, RandomState instance or None, default=None
85
+ Used during randomized svd. Pass an int for reproducible results across
86
+ multiple function calls.
87
+ See :term:`Glossary <random_state>`.
88
+
89
+ tol : float, default=0.0
90
+ Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
91
+ SVD solver.
92
+
93
+ Attributes
94
+ ----------
95
+ components_ : ndarray of shape (n_components, n_features)
96
+ The right singular vectors of the input data.
97
+
98
+ explained_variance_ : ndarray of shape (n_components,)
99
+ The variance of the training samples transformed by a projection to
100
+ each component.
101
+
102
+ explained_variance_ratio_ : ndarray of shape (n_components,)
103
+ Percentage of variance explained by each of the selected components.
104
+
105
+ singular_values_ : ndarray of shape (n_components,)
106
+ The singular values corresponding to each of the selected components.
107
+ The singular values are equal to the 2-norms of the ``n_components``
108
+ variables in the lower-dimensional space.
109
+
110
+ n_features_in_ : int
111
+ Number of features seen during :term:`fit`.
112
+
113
+ .. versionadded:: 0.24
114
+
115
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
116
+ Names of features seen during :term:`fit`. Defined only when `X`
117
+ has feature names that are all strings.
118
+
119
+ .. versionadded:: 1.0
120
+
121
+ See Also
122
+ --------
123
+ DictionaryLearning : Find a dictionary that sparsely encodes data.
124
+ FactorAnalysis : A simple linear generative model with
125
+ Gaussian latent variables.
126
+ IncrementalPCA : Incremental principal components analysis.
127
+ KernelPCA : Kernel Principal component analysis.
128
+ NMF : Non-Negative Matrix Factorization.
129
+ PCA : Principal component analysis.
130
+
131
+ Notes
132
+ -----
133
+ SVD suffers from a problem called "sign indeterminacy", which means the
134
+ sign of the ``components_`` and the output from transform depend on the
135
+ algorithm and random state. To work around this, fit instances of this
136
+ class to data once, then keep the instance around to do transformations.
137
+
138
+ References
139
+ ----------
140
+ :arxiv:`Halko, et al. (2009). "Finding structure with randomness:
141
+ Stochastic algorithms for constructing approximate matrix decompositions"
142
+ <0909.4061>`
143
+
144
+ Examples
145
+ --------
146
+ >>> from sklearn.decomposition import TruncatedSVD
147
+ >>> from scipy.sparse import csr_matrix
148
+ >>> import numpy as np
149
+ >>> np.random.seed(0)
150
+ >>> X_dense = np.random.rand(100, 100)
151
+ >>> X_dense[:, 2 * np.arange(50)] = 0
152
+ >>> X = csr_matrix(X_dense)
153
+ >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
154
+ >>> svd.fit(X)
155
+ TruncatedSVD(n_components=5, n_iter=7, random_state=42)
156
+ >>> print(svd.explained_variance_ratio_)
157
+ [0.0157... 0.0512... 0.0499... 0.0479... 0.0453...]
158
+ >>> print(svd.explained_variance_ratio_.sum())
159
+ 0.2102...
160
+ >>> print(svd.singular_values_)
161
+ [35.2410... 4.5981... 4.5420... 4.4486... 4.3288...]
162
+ """
163
+
164
+ _parameter_constraints: dict = {
165
+ "n_components": [Interval(Integral, 1, None, closed="left")],
166
+ "algorithm": [StrOptions({"arpack", "randomized"})],
167
+ "n_iter": [Interval(Integral, 0, None, closed="left")],
168
+ "n_oversamples": [Interval(Integral, 1, None, closed="left")],
169
+ "power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})],
170
+ "random_state": ["random_state"],
171
+ "tol": [Interval(Real, 0, None, closed="left")],
172
+ }
173
+
174
+ def __init__(
175
+ self,
176
+ n_components=2,
177
+ *,
178
+ algorithm="randomized",
179
+ n_iter=5,
180
+ n_oversamples=10,
181
+ power_iteration_normalizer="auto",
182
+ random_state=None,
183
+ tol=0.0,
184
+ ):
185
+ self.algorithm = algorithm
186
+ self.n_components = n_components
187
+ self.n_iter = n_iter
188
+ self.n_oversamples = n_oversamples
189
+ self.power_iteration_normalizer = power_iteration_normalizer
190
+ self.random_state = random_state
191
+ self.tol = tol
192
+
193
+ def fit(self, X, y=None):
194
+ """Fit model on training data X.
195
+
196
+ Parameters
197
+ ----------
198
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
199
+ Training data.
200
+
201
+ y : Ignored
202
+ Not used, present here for API consistency by convention.
203
+
204
+ Returns
205
+ -------
206
+ self : object
207
+ Returns the transformer object.
208
+ """
209
+ self.fit_transform(X)
210
+ return self
211
+
212
+ @_fit_context(prefer_skip_nested_validation=True)
213
+ def fit_transform(self, X, y=None):
214
+ """Fit model to X and perform dimensionality reduction on X.
215
+
216
+ Parameters
217
+ ----------
218
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
219
+ Training data.
220
+
221
+ y : Ignored
222
+ Not used, present here for API consistency by convention.
223
+
224
+ Returns
225
+ -------
226
+ X_new : ndarray of shape (n_samples, n_components)
227
+ Reduced version of X. This will always be a dense array.
228
+ """
229
+ X = self._validate_data(X, accept_sparse=["csr", "csc"], ensure_min_features=2)
230
+ random_state = check_random_state(self.random_state)
231
+
232
+ if self.algorithm == "arpack":
233
+ v0 = _init_arpack_v0(min(X.shape), random_state)
234
+ U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0)
235
+ # svds doesn't abide by scipy.linalg.svd/randomized_svd
236
+ # conventions, so reverse its outputs.
237
+ Sigma = Sigma[::-1]
238
+ U, VT = svd_flip(U[:, ::-1], VT[::-1])
239
+
240
+ elif self.algorithm == "randomized":
241
+ if self.n_components > X.shape[1]:
242
+ raise ValueError(
243
+ f"n_components({self.n_components}) must be <="
244
+ f" n_features({X.shape[1]})."
245
+ )
246
+ U, Sigma, VT = randomized_svd(
247
+ X,
248
+ self.n_components,
249
+ n_iter=self.n_iter,
250
+ n_oversamples=self.n_oversamples,
251
+ power_iteration_normalizer=self.power_iteration_normalizer,
252
+ random_state=random_state,
253
+ )
254
+
255
+ self.components_ = VT
256
+
257
+ # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T,
258
+ # X @ V is not the same as U @ Sigma
259
+ if self.algorithm == "randomized" or (
260
+ self.algorithm == "arpack" and self.tol > 0
261
+ ):
262
+ X_transformed = safe_sparse_dot(X, self.components_.T)
263
+ else:
264
+ X_transformed = U * Sigma
265
+
266
+ # Calculate explained variance & explained variance ratio
267
+ self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
268
+ if sp.issparse(X):
269
+ _, full_var = mean_variance_axis(X, axis=0)
270
+ full_var = full_var.sum()
271
+ else:
272
+ full_var = np.var(X, axis=0).sum()
273
+ self.explained_variance_ratio_ = exp_var / full_var
274
+ self.singular_values_ = Sigma # Store the singular values.
275
+
276
+ return X_transformed
277
+
278
+ def transform(self, X):
279
+ """Perform dimensionality reduction on X.
280
+
281
+ Parameters
282
+ ----------
283
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
284
+ New data.
285
+
286
+ Returns
287
+ -------
288
+ X_new : ndarray of shape (n_samples, n_components)
289
+ Reduced version of X. This will always be a dense array.
290
+ """
291
+ check_is_fitted(self)
292
+ X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False)
293
+ return safe_sparse_dot(X, self.components_.T)
294
+
295
+ def inverse_transform(self, X):
296
+ """Transform X back to its original space.
297
+
298
+ Returns an array X_original whose transform would be X.
299
+
300
+ Parameters
301
+ ----------
302
+ X : array-like of shape (n_samples, n_components)
303
+ New data.
304
+
305
+ Returns
306
+ -------
307
+ X_original : ndarray of shape (n_samples, n_features)
308
+ Note that this is always a dense array.
309
+ """
310
+ X = check_array(X)
311
+ return np.dot(X, self.components_)
312
+
313
+ def _more_tags(self):
314
+ return {"preserves_dtype": [np.float64, np.float32]}
315
+
316
+ @property
317
+ def _n_features_out(self):
318
+ """Number of transformed output features."""
319
+ return self.components_.shape[0]
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the fastica algorithm.
3
+ """
4
+ import itertools
5
+ import os
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytest
10
+ from scipy import stats
11
+
12
+ from sklearn.decomposition import PCA, FastICA, fastica
13
+ from sklearn.decomposition._fastica import _gs_decorrelation
14
+ from sklearn.exceptions import ConvergenceWarning
15
+ from sklearn.utils._testing import assert_allclose
16
+
17
+
18
+ def center_and_norm(x, axis=-1):
19
+ """Centers and norms x **in place**
20
+
21
+ Parameters
22
+ -----------
23
+ x: ndarray
24
+ Array with an axis of observations (statistical units) measured on
25
+ random variables.
26
+ axis: int, optional
27
+ Axis along which the mean and variance are calculated.
28
+ """
29
+ x = np.rollaxis(x, axis)
30
+ x -= x.mean(axis=0)
31
+ x /= x.std(axis=0)
32
+
33
+
34
+ def test_gs():
35
+ # Test gram schmidt orthonormalization
36
+ # generate a random orthogonal matrix
37
+ rng = np.random.RandomState(0)
38
+ W, _, _ = np.linalg.svd(rng.randn(10, 10))
39
+ w = rng.randn(10)
40
+ _gs_decorrelation(w, W, 10)
41
+ assert (w**2).sum() < 1.0e-10
42
+ w = rng.randn(10)
43
+ u = _gs_decorrelation(w, W, 5)
44
+ tmp = np.dot(u, W.T)
45
+ assert (tmp[:5] ** 2).sum() < 1.0e-10
46
+
47
+
48
+ def test_fastica_attributes_dtypes(global_dtype):
49
+ rng = np.random.RandomState(0)
50
+ X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
51
+ fica = FastICA(
52
+ n_components=5, max_iter=1000, whiten="unit-variance", random_state=0
53
+ ).fit(X)
54
+ assert fica.components_.dtype == global_dtype
55
+ assert fica.mixing_.dtype == global_dtype
56
+ assert fica.mean_.dtype == global_dtype
57
+ assert fica.whitening_.dtype == global_dtype
58
+
59
+
60
+ def test_fastica_return_dtypes(global_dtype):
61
+ rng = np.random.RandomState(0)
62
+ X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
63
+ k_, mixing_, s_ = fastica(
64
+ X, max_iter=1000, whiten="unit-variance", random_state=rng
65
+ )
66
+ assert k_.dtype == global_dtype
67
+ assert mixing_.dtype == global_dtype
68
+ assert s_.dtype == global_dtype
69
+
70
+
71
+ @pytest.mark.parametrize("add_noise", [True, False])
72
+ def test_fastica_simple(add_noise, global_random_seed, global_dtype):
73
+ if (
74
+ global_random_seed == 20
75
+ and global_dtype == np.float32
76
+ and not add_noise
77
+ and os.getenv("DISTRIB") == "ubuntu"
78
+ ):
79
+ pytest.xfail(
80
+ "FastICA instability with Ubuntu Atlas build with float32 "
81
+ "global_dtype. For more details, see "
82
+ "https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119" # noqa
83
+ )
84
+
85
+ # Test the FastICA algorithm on very simple data.
86
+ rng = np.random.RandomState(global_random_seed)
87
+ n_samples = 1000
88
+ # Generate two sources:
89
+ s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
90
+ s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed)
91
+ s = np.c_[s1, s2].T
92
+ center_and_norm(s)
93
+ s = s.astype(global_dtype)
94
+ s1, s2 = s
95
+
96
+ # Mixing angle
97
+ phi = 0.6
98
+ mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
99
+ mixing = mixing.astype(global_dtype)
100
+ m = np.dot(mixing, s)
101
+
102
+ if add_noise:
103
+ m += 0.1 * rng.randn(2, 1000)
104
+
105
+ center_and_norm(m)
106
+
107
+ # function as fun arg
108
+ def g_test(x):
109
+ return x**3, (3 * x**2).mean(axis=-1)
110
+
111
+ algos = ["parallel", "deflation"]
112
+ nls = ["logcosh", "exp", "cube", g_test]
113
+ whitening = ["arbitrary-variance", "unit-variance", False]
114
+ for algo, nl, whiten in itertools.product(algos, nls, whitening):
115
+ if whiten:
116
+ k_, mixing_, s_ = fastica(
117
+ m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng
118
+ )
119
+ with pytest.raises(ValueError):
120
+ fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo)
121
+ else:
122
+ pca = PCA(n_components=2, whiten=True, random_state=rng)
123
+ X = pca.fit_transform(m.T)
124
+ k_, mixing_, s_ = fastica(
125
+ X, fun=nl, algorithm=algo, whiten=False, random_state=rng
126
+ )
127
+ with pytest.raises(ValueError):
128
+ fastica(X, fun=np.tanh, algorithm=algo)
129
+ s_ = s_.T
130
+ # Check that the mixing model described in the docstring holds:
131
+ if whiten:
132
+ # XXX: exact reconstruction to standard relative tolerance is not
133
+ # possible. This is probably expected when add_noise is True but we
134
+ # also need a non-trivial atol in float32 when add_noise is False.
135
+ #
136
+ # Note that the 2 sources are non-Gaussian in this test.
137
+ atol = 1e-5 if global_dtype == np.float32 else 0
138
+ assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol)
139
+
140
+ center_and_norm(s_)
141
+ s1_, s2_ = s_
142
+ # Check to see if the sources have been estimated
143
+ # in the wrong order
144
+ if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
145
+ s2_, s1_ = s_
146
+ s1_ *= np.sign(np.dot(s1_, s1))
147
+ s2_ *= np.sign(np.dot(s2_, s2))
148
+
149
+ # Check that we have estimated the original sources
150
+ if not add_noise:
151
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2)
152
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2)
153
+ else:
154
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1)
155
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1)
156
+
157
+ # Test FastICA class
158
+ _, _, sources_fun = fastica(
159
+ m.T, fun=nl, algorithm=algo, random_state=global_random_seed
160
+ )
161
+ ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed)
162
+ sources = ica.fit_transform(m.T)
163
+ assert ica.components_.shape == (2, 2)
164
+ assert sources.shape == (1000, 2)
165
+
166
+ assert_allclose(sources_fun, sources)
167
+ # Set atol to account for the different magnitudes of the elements in sources
168
+ # (from 1e-4 to 1e1).
169
+ atol = np.max(np.abs(sources)) * (1e-5 if global_dtype == np.float32 else 1e-7)
170
+ assert_allclose(sources, ica.transform(m.T), atol=atol)
171
+
172
+ assert ica.mixing_.shape == (2, 2)
173
+
174
+ ica = FastICA(fun=np.tanh, algorithm=algo)
175
+ with pytest.raises(ValueError):
176
+ ica.fit(m.T)
177
+
178
+
179
+ def test_fastica_nowhiten():
180
+ m = [[0, 1], [1, 0]]
181
+
182
+ # test for issue #697
183
+ ica = FastICA(n_components=1, whiten=False, random_state=0)
184
+ warn_msg = "Ignoring n_components with whiten=False."
185
+ with pytest.warns(UserWarning, match=warn_msg):
186
+ ica.fit(m)
187
+ assert hasattr(ica, "mixing_")
188
+
189
+
190
+ def test_fastica_convergence_fail():
191
+ # Test the FastICA algorithm on very simple data
192
+ # (see test_non_square_fastica).
193
+ # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low.
194
+ rng = np.random.RandomState(0)
195
+
196
+ n_samples = 1000
197
+ # Generate two sources:
198
+ t = np.linspace(0, 100, n_samples)
199
+ s1 = np.sin(t)
200
+ s2 = np.ceil(np.sin(np.pi * t))
201
+ s = np.c_[s1, s2].T
202
+ center_and_norm(s)
203
+
204
+ # Mixing matrix
205
+ mixing = rng.randn(6, 2)
206
+ m = np.dot(mixing, s)
207
+
208
+ # Do fastICA with tolerance 0. to ensure failing convergence
209
+ warn_msg = (
210
+ "FastICA did not converge. Consider increasing tolerance "
211
+ "or the maximum number of iterations."
212
+ )
213
+ with pytest.warns(ConvergenceWarning, match=warn_msg):
214
+ ica = FastICA(
215
+ algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.0
216
+ )
217
+ ica.fit(m.T)
218
+
219
+
220
+ @pytest.mark.parametrize("add_noise", [True, False])
221
+ def test_non_square_fastica(add_noise):
222
+ # Test the FastICA algorithm on very simple data.
223
+ rng = np.random.RandomState(0)
224
+
225
+ n_samples = 1000
226
+ # Generate two sources:
227
+ t = np.linspace(0, 100, n_samples)
228
+ s1 = np.sin(t)
229
+ s2 = np.ceil(np.sin(np.pi * t))
230
+ s = np.c_[s1, s2].T
231
+ center_and_norm(s)
232
+ s1, s2 = s
233
+
234
+ # Mixing matrix
235
+ mixing = rng.randn(6, 2)
236
+ m = np.dot(mixing, s)
237
+
238
+ if add_noise:
239
+ m += 0.1 * rng.randn(6, n_samples)
240
+
241
+ center_and_norm(m)
242
+
243
+ k_, mixing_, s_ = fastica(
244
+ m.T, n_components=2, whiten="unit-variance", random_state=rng
245
+ )
246
+ s_ = s_.T
247
+
248
+ # Check that the mixing model described in the docstring holds:
249
+ assert_allclose(s_, np.dot(np.dot(mixing_, k_), m))
250
+
251
+ center_and_norm(s_)
252
+ s1_, s2_ = s_
253
+ # Check to see if the sources have been estimated
254
+ # in the wrong order
255
+ if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
256
+ s2_, s1_ = s_
257
+ s1_ *= np.sign(np.dot(s1_, s1))
258
+ s2_ *= np.sign(np.dot(s2_, s2))
259
+
260
+ # Check that we have estimated the original sources
261
+ if not add_noise:
262
+ assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3)
263
+ assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3)
264
+
265
+
266
+ def test_fit_transform(global_random_seed, global_dtype):
267
+ """Test unit variance of transformed data using FastICA algorithm.
268
+
269
+ Check that `fit_transform` gives the same result as applying
270
+ `fit` and then `transform`.
271
+
272
+ Bug #13056
273
+ """
274
+ # multivariate uniform data in [0, 1]
275
+ rng = np.random.RandomState(global_random_seed)
276
+ X = rng.random_sample((100, 10)).astype(global_dtype)
277
+ max_iter = 300
278
+ for whiten, n_components in [["unit-variance", 5], [False, None]]:
279
+ n_components_ = n_components if n_components is not None else X.shape[1]
280
+
281
+ ica = FastICA(
282
+ n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
283
+ )
284
+ with warnings.catch_warnings():
285
+ # make sure that numerical errors do not cause sqrt of negative
286
+ # values
287
+ warnings.simplefilter("error", RuntimeWarning)
288
+ # XXX: for some seeds, the model does not converge.
289
+ # However this is not what we test here.
290
+ warnings.simplefilter("ignore", ConvergenceWarning)
291
+ Xt = ica.fit_transform(X)
292
+ assert ica.components_.shape == (n_components_, 10)
293
+ assert Xt.shape == (X.shape[0], n_components_)
294
+
295
+ ica2 = FastICA(
296
+ n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
297
+ )
298
+ with warnings.catch_warnings():
299
+ # make sure that numerical errors do not cause sqrt of negative
300
+ # values
301
+ warnings.simplefilter("error", RuntimeWarning)
302
+ warnings.simplefilter("ignore", ConvergenceWarning)
303
+ ica2.fit(X)
304
+ assert ica2.components_.shape == (n_components_, 10)
305
+ Xt2 = ica2.transform(X)
306
+
307
+ # XXX: we have to set atol for this test to pass for all seeds when
308
+ # fitting with float32 data. Is this revealing a bug?
309
+ if global_dtype:
310
+ atol = np.abs(Xt2).mean() / 1e6
311
+ else:
312
+ atol = 0.0 # the default rtol is enough for float64 data
313
+ assert_allclose(Xt, Xt2, atol=atol)
314
+
315
+
316
+ @pytest.mark.filterwarnings("ignore:Ignoring n_components with whiten=False.")
317
+ @pytest.mark.parametrize(
318
+ "whiten, n_components, expected_mixing_shape",
319
+ [
320
+ ("arbitrary-variance", 5, (10, 5)),
321
+ ("arbitrary-variance", 10, (10, 10)),
322
+ ("unit-variance", 5, (10, 5)),
323
+ ("unit-variance", 10, (10, 10)),
324
+ (False, 5, (10, 10)),
325
+ (False, 10, (10, 10)),
326
+ ],
327
+ )
328
+ def test_inverse_transform(
329
+ whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype
330
+ ):
331
+ # Test FastICA.inverse_transform
332
+ n_samples = 100
333
+ rng = np.random.RandomState(global_random_seed)
334
+ X = rng.random_sample((n_samples, 10)).astype(global_dtype)
335
+
336
+ ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten)
337
+ with warnings.catch_warnings():
338
+ # For some dataset (depending on the value of global_dtype) the model
339
+ # can fail to converge but this should not impact the definition of
340
+ # a valid inverse transform.
341
+ warnings.simplefilter("ignore", ConvergenceWarning)
342
+ Xt = ica.fit_transform(X)
343
+ assert ica.mixing_.shape == expected_mixing_shape
344
+ X2 = ica.inverse_transform(Xt)
345
+ assert X.shape == X2.shape
346
+
347
+ # reversibility test in non-reduction case
348
+ if n_components == X.shape[1]:
349
+ # XXX: we have to set atol for this test to pass for all seeds when
350
+ # fitting with float32 data. Is this revealing a bug?
351
+ if global_dtype:
352
+ # XXX: dividing by a smaller number makes
353
+ # tests fail for some seeds.
354
+ atol = np.abs(X2).mean() / 1e5
355
+ else:
356
+ atol = 0.0 # the default rtol is enough for float64 data
357
+ assert_allclose(X, X2, atol=atol)
358
+
359
+
360
+ def test_fastica_errors():
361
+ n_features = 3
362
+ n_samples = 10
363
+ rng = np.random.RandomState(0)
364
+ X = rng.random_sample((n_samples, n_features))
365
+ w_init = rng.randn(n_features + 1, n_features + 1)
366
+ with pytest.raises(ValueError, match=r"alpha must be in \[1,2\]"):
367
+ fastica(X, fun_args={"alpha": 0})
368
+ with pytest.raises(
369
+ ValueError, match="w_init has invalid shape.+" r"should be \(3L?, 3L?\)"
370
+ ):
371
+ fastica(X, w_init=w_init)
372
+
373
+
374
+ def test_fastica_whiten_unit_variance():
375
+ """Test unit variance of transformed data using FastICA algorithm.
376
+
377
+ Bug #13056
378
+ """
379
+ rng = np.random.RandomState(0)
380
+ X = rng.random_sample((100, 10))
381
+ n_components = X.shape[1]
382
+ ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0)
383
+ Xt = ica.fit_transform(X)
384
+
385
+ assert np.var(Xt) == pytest.approx(1.0)
386
+
387
+
388
+ @pytest.mark.parametrize("whiten", ["arbitrary-variance", "unit-variance", False])
389
+ @pytest.mark.parametrize("return_X_mean", [True, False])
390
+ @pytest.mark.parametrize("return_n_iter", [True, False])
391
+ def test_fastica_output_shape(whiten, return_X_mean, return_n_iter):
392
+ n_features = 3
393
+ n_samples = 10
394
+ rng = np.random.RandomState(0)
395
+ X = rng.random_sample((n_samples, n_features))
396
+
397
+ expected_len = 3 + return_X_mean + return_n_iter
398
+
399
+ out = fastica(
400
+ X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean
401
+ )
402
+
403
+ assert len(out) == expected_len
404
+ if not whiten:
405
+ assert out[0] is None
406
+
407
+
408
+ @pytest.mark.parametrize("add_noise", [True, False])
409
+ def test_fastica_simple_different_solvers(add_noise, global_random_seed):
410
+ """Test FastICA is consistent between whiten_solvers."""
411
+ rng = np.random.RandomState(global_random_seed)
412
+ n_samples = 1000
413
+ # Generate two sources:
414
+ s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
415
+ s2 = stats.t.rvs(1, size=n_samples, random_state=rng)
416
+ s = np.c_[s1, s2].T
417
+ center_and_norm(s)
418
+ s1, s2 = s
419
+
420
+ # Mixing angle
421
+ phi = rng.rand() * 2 * np.pi
422
+ mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
423
+ m = np.dot(mixing, s)
424
+
425
+ if add_noise:
426
+ m += 0.1 * rng.randn(2, 1000)
427
+
428
+ center_and_norm(m)
429
+
430
+ outs = {}
431
+ for solver in ("svd", "eigh"):
432
+ ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver=solver)
433
+ sources = ica.fit_transform(m.T)
434
+ outs[solver] = sources
435
+ assert ica.components_.shape == (2, 2)
436
+ assert sources.shape == (1000, 2)
437
+
438
+ # compared numbers are not all on the same magnitude. Using a small atol to
439
+ # make the test less brittle
440
+ assert_allclose(outs["eigh"], outs["svd"], atol=1e-12)
441
+
442
+
443
+ def test_fastica_eigh_low_rank_warning(global_random_seed):
444
+ """Test FastICA eigh solver raises warning for low-rank data."""
445
+ rng = np.random.RandomState(global_random_seed)
446
+ A = rng.randn(10, 2)
447
+ X = A @ A.T
448
+ ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh")
449
+ msg = "There are some small singular values"
450
+ with pytest.warns(UserWarning, match=msg):
451
+ ica.fit(X)
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py ADDED
@@ -0,0 +1,1062 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+ import warnings
4
+ from io import StringIO
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from scipy import linalg
9
+
10
+ from sklearn.base import clone
11
+ from sklearn.decomposition import NMF, MiniBatchNMF, non_negative_factorization
12
+ from sklearn.decomposition import _nmf as nmf # For testing internals
13
+ from sklearn.exceptions import ConvergenceWarning
14
+ from sklearn.utils._testing import (
15
+ assert_allclose,
16
+ assert_almost_equal,
17
+ assert_array_almost_equal,
18
+ assert_array_equal,
19
+ ignore_warnings,
20
+ )
21
+ from sklearn.utils.extmath import squared_norm
22
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
23
+
24
+
25
+ @pytest.mark.parametrize(
26
+ ["Estimator", "solver"],
27
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
28
+ )
29
+ def test_convergence_warning(Estimator, solver):
30
+ convergence_warning = (
31
+ "Maximum number of iterations 1 reached. Increase it to improve convergence."
32
+ )
33
+ A = np.ones((2, 2))
34
+ with pytest.warns(ConvergenceWarning, match=convergence_warning):
35
+ Estimator(max_iter=1, n_components="auto", **solver).fit(A)
36
+
37
+
38
+ def test_initialize_nn_output():
39
+ # Test that initialization does not return negative values
40
+ rng = np.random.mtrand.RandomState(42)
41
+ data = np.abs(rng.randn(10, 10))
42
+ for init in ("random", "nndsvd", "nndsvda", "nndsvdar"):
43
+ W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
44
+ assert not ((W < 0).any() or (H < 0).any())
45
+
46
+
47
+ # TODO(1.6): remove the warning filter for `n_components`
48
+ @pytest.mark.filterwarnings(
49
+ r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in"
50
+ r" the initialization",
51
+ "ignore:The default value of `n_components` will change",
52
+ )
53
+ def test_parameter_checking():
54
+ # Here we only check for invalid parameter values that are not already
55
+ # automatically tested in the common tests.
56
+
57
+ A = np.ones((2, 2))
58
+
59
+ msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0"
60
+ with pytest.raises(ValueError, match=msg):
61
+ NMF(solver="cd", beta_loss=1.0).fit(A)
62
+ msg = "Negative values in data passed to"
63
+ with pytest.raises(ValueError, match=msg):
64
+ NMF().fit(-A)
65
+ clf = NMF(2, tol=0.1).fit(A)
66
+ with pytest.raises(ValueError, match=msg):
67
+ clf.transform(-A)
68
+ with pytest.raises(ValueError, match=msg):
69
+ nmf._initialize_nmf(-A, 2, "nndsvd")
70
+
71
+ for init in ["nndsvd", "nndsvda", "nndsvdar"]:
72
+ msg = re.escape(
73
+ "init = '{}' can only be used when "
74
+ "n_components <= min(n_samples, n_features)".format(init)
75
+ )
76
+ with pytest.raises(ValueError, match=msg):
77
+ NMF(3, init=init).fit(A)
78
+ with pytest.raises(ValueError, match=msg):
79
+ MiniBatchNMF(3, init=init).fit(A)
80
+ with pytest.raises(ValueError, match=msg):
81
+ nmf._initialize_nmf(A, 3, init)
82
+
83
+
84
+ def test_initialize_close():
85
+ # Test NNDSVD error
86
+ # Test that _initialize_nmf error is less than the standard deviation of
87
+ # the entries in the matrix.
88
+ rng = np.random.mtrand.RandomState(42)
89
+ A = np.abs(rng.randn(10, 10))
90
+ W, H = nmf._initialize_nmf(A, 10, init="nndsvd")
91
+ error = linalg.norm(np.dot(W, H) - A)
92
+ sdev = linalg.norm(A - A.mean())
93
+ assert error <= sdev
94
+
95
+
96
+ def test_initialize_variants():
97
+ # Test NNDSVD variants correctness
98
+ # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
99
+ # 'nndsvd' only where the basic version has zeros.
100
+ rng = np.random.mtrand.RandomState(42)
101
+ data = np.abs(rng.randn(10, 10))
102
+ W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd")
103
+ Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda")
104
+ War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0)
105
+
106
+ for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
107
+ assert_almost_equal(evl[ref != 0], ref[ref != 0])
108
+
109
+
110
+ # ignore UserWarning raised when both solver='mu' and init='nndsvd'
111
+ @ignore_warnings(category=UserWarning)
112
+ @pytest.mark.parametrize(
113
+ ["Estimator", "solver"],
114
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
115
+ )
116
+ @pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random"))
117
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
118
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
119
+ def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H):
120
+ # Test that the decomposition does not contain negative values
121
+ A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)]
122
+ model = Estimator(
123
+ n_components=2,
124
+ init=init,
125
+ alpha_W=alpha_W,
126
+ alpha_H=alpha_H,
127
+ random_state=0,
128
+ **solver,
129
+ )
130
+ transf = model.fit_transform(A)
131
+ assert not ((model.components_ < 0).any() or (transf < 0).any())
132
+
133
+
134
+ @pytest.mark.parametrize(
135
+ ["Estimator", "solver"],
136
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
137
+ )
138
+ def test_nmf_fit_close(Estimator, solver):
139
+ rng = np.random.mtrand.RandomState(42)
140
+ # Test that the fit is not too far away
141
+ pnmf = Estimator(
142
+ 5,
143
+ init="nndsvdar",
144
+ random_state=0,
145
+ max_iter=600,
146
+ **solver,
147
+ )
148
+ X = np.abs(rng.randn(6, 5))
149
+ assert pnmf.fit(X).reconstruction_err_ < 0.1
150
+
151
+
152
+ def test_nmf_true_reconstruction():
153
+ # Test that the fit is not too far away from an exact solution
154
+ # (by construction)
155
+ n_samples = 15
156
+ n_features = 10
157
+ n_components = 5
158
+ beta_loss = 1
159
+ batch_size = 3
160
+ max_iter = 1000
161
+
162
+ rng = np.random.mtrand.RandomState(42)
163
+ W_true = np.zeros([n_samples, n_components])
164
+ W_array = np.abs(rng.randn(n_samples))
165
+ for j in range(n_components):
166
+ W_true[j % n_samples, j] = W_array[j % n_samples]
167
+ H_true = np.zeros([n_components, n_features])
168
+ H_array = np.abs(rng.randn(n_components))
169
+ for j in range(n_features):
170
+ H_true[j % n_components, j] = H_array[j % n_components]
171
+ X = np.dot(W_true, H_true)
172
+
173
+ model = NMF(
174
+ n_components=n_components,
175
+ solver="mu",
176
+ beta_loss=beta_loss,
177
+ max_iter=max_iter,
178
+ random_state=0,
179
+ )
180
+ transf = model.fit_transform(X)
181
+ X_calc = np.dot(transf, model.components_)
182
+
183
+ assert model.reconstruction_err_ < 0.1
184
+ assert_allclose(X, X_calc)
185
+
186
+ mbmodel = MiniBatchNMF(
187
+ n_components=n_components,
188
+ beta_loss=beta_loss,
189
+ batch_size=batch_size,
190
+ random_state=0,
191
+ max_iter=max_iter,
192
+ )
193
+ transf = mbmodel.fit_transform(X)
194
+ X_calc = np.dot(transf, mbmodel.components_)
195
+
196
+ assert mbmodel.reconstruction_err_ < 0.1
197
+ assert_allclose(X, X_calc, atol=1)
198
+
199
+
200
+ @pytest.mark.parametrize("solver", ["cd", "mu"])
201
+ def test_nmf_transform(solver):
202
+ # Test that fit_transform is equivalent to fit.transform for NMF
203
+ # Test that NMF.transform returns close values
204
+ rng = np.random.mtrand.RandomState(42)
205
+ A = np.abs(rng.randn(6, 5))
206
+ m = NMF(
207
+ solver=solver,
208
+ n_components=3,
209
+ init="random",
210
+ random_state=0,
211
+ tol=1e-6,
212
+ )
213
+ ft = m.fit_transform(A)
214
+ t = m.transform(A)
215
+ assert_allclose(ft, t, atol=1e-1)
216
+
217
+
218
+ def test_minibatch_nmf_transform():
219
+ # Test that fit_transform is equivalent to fit.transform for MiniBatchNMF
220
+ # Only guaranteed with fresh restarts
221
+ rng = np.random.mtrand.RandomState(42)
222
+ A = np.abs(rng.randn(6, 5))
223
+ m = MiniBatchNMF(
224
+ n_components=3,
225
+ random_state=0,
226
+ tol=1e-3,
227
+ fresh_restarts=True,
228
+ )
229
+ ft = m.fit_transform(A)
230
+ t = m.transform(A)
231
+ assert_allclose(ft, t)
232
+
233
+
234
+ @pytest.mark.parametrize(
235
+ ["Estimator", "solver"],
236
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
237
+ )
238
+ def test_nmf_transform_custom_init(Estimator, solver):
239
+ # Smoke test that checks if NMF.transform works with custom initialization
240
+ random_state = np.random.RandomState(0)
241
+ A = np.abs(random_state.randn(6, 5))
242
+ n_components = 4
243
+ avg = np.sqrt(A.mean() / n_components)
244
+ H_init = np.abs(avg * random_state.randn(n_components, 5))
245
+ W_init = np.abs(avg * random_state.randn(6, n_components))
246
+
247
+ m = Estimator(
248
+ n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver
249
+ )
250
+ m.fit_transform(A, W=W_init, H=H_init)
251
+ m.transform(A)
252
+
253
+
254
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
255
+ def test_nmf_inverse_transform(solver):
256
+ # Test that NMF.inverse_transform returns close values
257
+ random_state = np.random.RandomState(0)
258
+ A = np.abs(random_state.randn(6, 4))
259
+ m = NMF(
260
+ solver=solver,
261
+ n_components=4,
262
+ init="random",
263
+ random_state=0,
264
+ max_iter=1000,
265
+ )
266
+ ft = m.fit_transform(A)
267
+ A_new = m.inverse_transform(ft)
268
+ assert_array_almost_equal(A, A_new, decimal=2)
269
+
270
+
271
+ # TODO(1.6): remove the warning filter
272
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
273
+ def test_mbnmf_inverse_transform():
274
+ # Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform
275
+ # is close to the identity
276
+ rng = np.random.RandomState(0)
277
+ A = np.abs(rng.randn(6, 4))
278
+ nmf = MiniBatchNMF(
279
+ random_state=rng,
280
+ max_iter=500,
281
+ init="nndsvdar",
282
+ fresh_restarts=True,
283
+ )
284
+ ft = nmf.fit_transform(A)
285
+ A_new = nmf.inverse_transform(ft)
286
+ assert_allclose(A, A_new, rtol=1e-3, atol=1e-2)
287
+
288
+
289
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
290
+ def test_n_components_greater_n_features(Estimator):
291
+ # Smoke test for the case of more components than features.
292
+ rng = np.random.mtrand.RandomState(42)
293
+ A = np.abs(rng.randn(30, 10))
294
+ Estimator(n_components=15, random_state=0, tol=1e-2).fit(A)
295
+
296
+
297
+ @pytest.mark.parametrize(
298
+ ["Estimator", "solver"],
299
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
300
+ )
301
+ @pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
302
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
303
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
304
+ def test_nmf_sparse_input(Estimator, solver, sparse_container, alpha_W, alpha_H):
305
+ # Test that sparse matrices are accepted as input
306
+ rng = np.random.mtrand.RandomState(42)
307
+ A = np.abs(rng.randn(10, 10))
308
+ A[:, 2 * np.arange(5)] = 0
309
+ A_sparse = sparse_container(A)
310
+
311
+ est1 = Estimator(
312
+ n_components=5,
313
+ init="random",
314
+ alpha_W=alpha_W,
315
+ alpha_H=alpha_H,
316
+ random_state=0,
317
+ tol=0,
318
+ max_iter=100,
319
+ **solver,
320
+ )
321
+ est2 = clone(est1)
322
+
323
+ W1 = est1.fit_transform(A)
324
+ W2 = est2.fit_transform(A_sparse)
325
+ H1 = est1.components_
326
+ H2 = est2.components_
327
+
328
+ assert_allclose(W1, W2)
329
+ assert_allclose(H1, H2)
330
+
331
+
332
+ @pytest.mark.parametrize(
333
+ ["Estimator", "solver"],
334
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
335
+ )
336
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
337
+ def test_nmf_sparse_transform(Estimator, solver, csc_container):
338
+ # Test that transform works on sparse data. Issue #2124
339
+ rng = np.random.mtrand.RandomState(42)
340
+ A = np.abs(rng.randn(3, 2))
341
+ A[1, 1] = 0
342
+ A = csc_container(A)
343
+
344
+ model = Estimator(random_state=0, n_components=2, max_iter=400, **solver)
345
+ A_fit_tr = model.fit_transform(A)
346
+ A_tr = model.transform(A)
347
+ assert_allclose(A_fit_tr, A_tr, atol=1e-1)
348
+
349
+
350
+ # TODO(1.6): remove the warning filter
351
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
352
+ @pytest.mark.parametrize("init", ["random", "nndsvd"])
353
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
354
+ @pytest.mark.parametrize("alpha_W", (0.0, 1.0))
355
+ @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
356
+ def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H):
357
+ # Test that the function is called in the same way, either directly
358
+ # or through the NMF class
359
+ max_iter = 500
360
+ rng = np.random.mtrand.RandomState(42)
361
+ A = np.abs(rng.randn(10, 10))
362
+ A[:, 2 * np.arange(5)] = 0
363
+
364
+ W_nmf, H, _ = non_negative_factorization(
365
+ A,
366
+ init=init,
367
+ solver=solver,
368
+ max_iter=max_iter,
369
+ alpha_W=alpha_W,
370
+ alpha_H=alpha_H,
371
+ random_state=1,
372
+ tol=1e-2,
373
+ )
374
+ W_nmf_2, H, _ = non_negative_factorization(
375
+ A,
376
+ H=H,
377
+ update_H=False,
378
+ init=init,
379
+ solver=solver,
380
+ max_iter=max_iter,
381
+ alpha_W=alpha_W,
382
+ alpha_H=alpha_H,
383
+ random_state=1,
384
+ tol=1e-2,
385
+ )
386
+
387
+ model_class = NMF(
388
+ init=init,
389
+ solver=solver,
390
+ max_iter=max_iter,
391
+ alpha_W=alpha_W,
392
+ alpha_H=alpha_H,
393
+ random_state=1,
394
+ tol=1e-2,
395
+ )
396
+ W_cls = model_class.fit_transform(A)
397
+ W_cls_2 = model_class.transform(A)
398
+
399
+ assert_allclose(W_nmf, W_cls)
400
+ assert_allclose(W_nmf_2, W_cls_2)
401
+
402
+
403
+ def test_non_negative_factorization_checking():
404
+ # Note that the validity of parameter types and range of possible values
405
+ # for scalar numerical or str parameters is already checked in the common
406
+ # tests. Here we only check for problems that cannot be captured by simple
407
+ # declarative constraints on the valid parameter values.
408
+
409
+ A = np.ones((2, 2))
410
+ # Test parameters checking in public function
411
+ nnmf = non_negative_factorization
412
+ msg = re.escape("Negative values in data passed to NMF (input H)")
413
+ with pytest.raises(ValueError, match=msg):
414
+ nnmf(A, A, -A, 2, init="custom")
415
+ msg = re.escape("Negative values in data passed to NMF (input W)")
416
+ with pytest.raises(ValueError, match=msg):
417
+ nnmf(A, -A, A, 2, init="custom")
418
+ msg = re.escape("Array passed to NMF (input H) is full of zeros")
419
+ with pytest.raises(ValueError, match=msg):
420
+ nnmf(A, A, 0 * A, 2, init="custom")
421
+
422
+
423
+ def _beta_divergence_dense(X, W, H, beta):
424
+ """Compute the beta-divergence of X and W.H for dense array only.
425
+
426
+ Used as a reference for testing nmf._beta_divergence.
427
+ """
428
+ WH = np.dot(W, H)
429
+
430
+ if beta == 2:
431
+ return squared_norm(X - WH) / 2
432
+
433
+ WH_Xnonzero = WH[X != 0]
434
+ X_nonzero = X[X != 0]
435
+ np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
436
+
437
+ if beta == 1:
438
+ res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
439
+ res += WH.sum() - X.sum()
440
+
441
+ elif beta == 0:
442
+ div = X_nonzero / WH_Xnonzero
443
+ res = np.sum(div) - X.size - np.sum(np.log(div))
444
+ else:
445
+ res = (X_nonzero**beta).sum()
446
+ res += (beta - 1) * (WH**beta).sum()
447
+ res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
448
+ res /= beta * (beta - 1)
449
+
450
+ return res
451
+
452
+
453
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
454
+ def test_beta_divergence(csr_container):
455
+ # Compare _beta_divergence with the reference _beta_divergence_dense
456
+ n_samples = 20
457
+ n_features = 10
458
+ n_components = 5
459
+ beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0]
460
+
461
+ # initialization
462
+ rng = np.random.mtrand.RandomState(42)
463
+ X = rng.randn(n_samples, n_features)
464
+ np.clip(X, 0, None, out=X)
465
+ X_csr = csr_container(X)
466
+ W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
467
+
468
+ for beta in beta_losses:
469
+ ref = _beta_divergence_dense(X, W, H, beta)
470
+ loss = nmf._beta_divergence(X, W, H, beta)
471
+ loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
472
+
473
+ assert_almost_equal(ref, loss, decimal=7)
474
+ assert_almost_equal(ref, loss_csr, decimal=7)
475
+
476
+
477
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
478
+ def test_special_sparse_dot(csr_container):
479
+ # Test the function that computes np.dot(W, H), only where X is non zero.
480
+ n_samples = 10
481
+ n_features = 5
482
+ n_components = 3
483
+ rng = np.random.mtrand.RandomState(42)
484
+ X = rng.randn(n_samples, n_features)
485
+ np.clip(X, 0, None, out=X)
486
+ X_csr = csr_container(X)
487
+
488
+ W = np.abs(rng.randn(n_samples, n_components))
489
+ H = np.abs(rng.randn(n_components, n_features))
490
+
491
+ WH_safe = nmf._special_sparse_dot(W, H, X_csr)
492
+ WH = nmf._special_sparse_dot(W, H, X)
493
+
494
+ # test that both results have same values, in X_csr nonzero elements
495
+ ii, jj = X_csr.nonzero()
496
+ WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
497
+ assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
498
+
499
+ # test that WH_safe and X_csr have the same sparse structure
500
+ assert_array_equal(WH_safe.indices, X_csr.indices)
501
+ assert_array_equal(WH_safe.indptr, X_csr.indptr)
502
+ assert_array_equal(WH_safe.shape, X_csr.shape)
503
+
504
+
505
+ @ignore_warnings(category=ConvergenceWarning)
506
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
507
+ def test_nmf_multiplicative_update_sparse(csr_container):
508
+ # Compare sparse and dense input in multiplicative update NMF
509
+ # Also test continuity of the results with respect to beta_loss parameter
510
+ n_samples = 20
511
+ n_features = 10
512
+ n_components = 5
513
+ alpha = 0.1
514
+ l1_ratio = 0.5
515
+ n_iter = 20
516
+
517
+ # initialization
518
+ rng = np.random.mtrand.RandomState(1337)
519
+ X = rng.randn(n_samples, n_features)
520
+ X = np.abs(X)
521
+ X_csr = csr_container(X)
522
+ W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
523
+
524
+ for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
525
+ # Reference with dense array X
526
+ W, H = W0.copy(), H0.copy()
527
+ W1, H1, _ = non_negative_factorization(
528
+ X,
529
+ W,
530
+ H,
531
+ n_components,
532
+ init="custom",
533
+ update_H=True,
534
+ solver="mu",
535
+ beta_loss=beta_loss,
536
+ max_iter=n_iter,
537
+ alpha_W=alpha,
538
+ l1_ratio=l1_ratio,
539
+ random_state=42,
540
+ )
541
+
542
+ # Compare with sparse X
543
+ W, H = W0.copy(), H0.copy()
544
+ W2, H2, _ = non_negative_factorization(
545
+ X_csr,
546
+ W,
547
+ H,
548
+ n_components,
549
+ init="custom",
550
+ update_H=True,
551
+ solver="mu",
552
+ beta_loss=beta_loss,
553
+ max_iter=n_iter,
554
+ alpha_W=alpha,
555
+ l1_ratio=l1_ratio,
556
+ random_state=42,
557
+ )
558
+
559
+ assert_allclose(W1, W2, atol=1e-7)
560
+ assert_allclose(H1, H2, atol=1e-7)
561
+
562
+ # Compare with almost same beta_loss, since some values have a specific
563
+ # behavior, but the results should be continuous w.r.t beta_loss
564
+ beta_loss -= 1.0e-5
565
+ W, H = W0.copy(), H0.copy()
566
+ W3, H3, _ = non_negative_factorization(
567
+ X_csr,
568
+ W,
569
+ H,
570
+ n_components,
571
+ init="custom",
572
+ update_H=True,
573
+ solver="mu",
574
+ beta_loss=beta_loss,
575
+ max_iter=n_iter,
576
+ alpha_W=alpha,
577
+ l1_ratio=l1_ratio,
578
+ random_state=42,
579
+ )
580
+
581
+ assert_allclose(W1, W3, atol=1e-4)
582
+ assert_allclose(H1, H3, atol=1e-4)
583
+
584
+
585
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
586
+ def test_nmf_negative_beta_loss(csr_container):
587
+ # Test that an error is raised if beta_loss < 0 and X contains zeros.
588
+ # Test that the output has not NaN values when the input contains zeros.
589
+ n_samples = 6
590
+ n_features = 5
591
+ n_components = 3
592
+
593
+ rng = np.random.mtrand.RandomState(42)
594
+ X = rng.randn(n_samples, n_features)
595
+ np.clip(X, 0, None, out=X)
596
+ X_csr = csr_container(X)
597
+
598
+ def _assert_nmf_no_nan(X, beta_loss):
599
+ W, H, _ = non_negative_factorization(
600
+ X,
601
+ init="random",
602
+ n_components=n_components,
603
+ solver="mu",
604
+ beta_loss=beta_loss,
605
+ random_state=0,
606
+ max_iter=1000,
607
+ )
608
+ assert not np.any(np.isnan(W))
609
+ assert not np.any(np.isnan(H))
610
+
611
+ msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
612
+ for beta_loss in (-0.6, 0.0):
613
+ with pytest.raises(ValueError, match=msg):
614
+ _assert_nmf_no_nan(X, beta_loss)
615
+ _assert_nmf_no_nan(X + 1e-9, beta_loss)
616
+
617
+ for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5):
618
+ _assert_nmf_no_nan(X, beta_loss)
619
+ _assert_nmf_no_nan(X_csr, beta_loss)
620
+
621
+
622
+ # TODO(1.6): remove the warning filter
623
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
624
+ @pytest.mark.parametrize("beta_loss", [-0.5, 0.0])
625
+ def test_minibatch_nmf_negative_beta_loss(beta_loss):
626
+ """Check that an error is raised if beta_loss < 0 and X contains zeros."""
627
+ rng = np.random.RandomState(0)
628
+ X = rng.normal(size=(6, 5))
629
+ X[X < 0] = 0
630
+
631
+ nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0)
632
+
633
+ msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
634
+ with pytest.raises(ValueError, match=msg):
635
+ nmf.fit(X)
636
+
637
+
638
+ @pytest.mark.parametrize(
639
+ ["Estimator", "solver"],
640
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
641
+ )
642
+ def test_nmf_regularization(Estimator, solver):
643
+ # Test the effect of L1 and L2 regularizations
644
+ n_samples = 6
645
+ n_features = 5
646
+ n_components = 3
647
+ rng = np.random.mtrand.RandomState(42)
648
+ X = np.abs(rng.randn(n_samples, n_features))
649
+
650
+ # L1 regularization should increase the number of zeros
651
+ l1_ratio = 1.0
652
+ regul = Estimator(
653
+ n_components=n_components,
654
+ alpha_W=0.5,
655
+ l1_ratio=l1_ratio,
656
+ random_state=42,
657
+ **solver,
658
+ )
659
+ model = Estimator(
660
+ n_components=n_components,
661
+ alpha_W=0.0,
662
+ l1_ratio=l1_ratio,
663
+ random_state=42,
664
+ **solver,
665
+ )
666
+
667
+ W_regul = regul.fit_transform(X)
668
+ W_model = model.fit_transform(X)
669
+
670
+ H_regul = regul.components_
671
+ H_model = model.components_
672
+
673
+ eps = np.finfo(np.float64).eps
674
+ W_regul_n_zeros = W_regul[W_regul <= eps].size
675
+ W_model_n_zeros = W_model[W_model <= eps].size
676
+ H_regul_n_zeros = H_regul[H_regul <= eps].size
677
+ H_model_n_zeros = H_model[H_model <= eps].size
678
+
679
+ assert W_regul_n_zeros > W_model_n_zeros
680
+ assert H_regul_n_zeros > H_model_n_zeros
681
+
682
+ # L2 regularization should decrease the sum of the squared norm
683
+ # of the matrices W and H
684
+ l1_ratio = 0.0
685
+ regul = Estimator(
686
+ n_components=n_components,
687
+ alpha_W=0.5,
688
+ l1_ratio=l1_ratio,
689
+ random_state=42,
690
+ **solver,
691
+ )
692
+ model = Estimator(
693
+ n_components=n_components,
694
+ alpha_W=0.0,
695
+ l1_ratio=l1_ratio,
696
+ random_state=42,
697
+ **solver,
698
+ )
699
+
700
+ W_regul = regul.fit_transform(X)
701
+ W_model = model.fit_transform(X)
702
+
703
+ H_regul = regul.components_
704
+ H_model = model.components_
705
+
706
+ assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > (
707
+ linalg.norm(W_regul)
708
+ ) ** 2.0 + (linalg.norm(H_regul)) ** 2.0
709
+
710
+
711
+ @ignore_warnings(category=ConvergenceWarning)
712
+ @pytest.mark.parametrize("solver", ("cd", "mu"))
713
+ def test_nmf_decreasing(solver):
714
+ # test that the objective function is decreasing at each iteration
715
+ n_samples = 20
716
+ n_features = 15
717
+ n_components = 10
718
+ alpha = 0.1
719
+ l1_ratio = 0.5
720
+ tol = 0.0
721
+
722
+ # initialization
723
+ rng = np.random.mtrand.RandomState(42)
724
+ X = rng.randn(n_samples, n_features)
725
+ np.abs(X, X)
726
+ W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
727
+
728
+ for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
729
+ if solver != "mu" and beta_loss != 2:
730
+ # not implemented
731
+ continue
732
+ W, H = W0.copy(), H0.copy()
733
+ previous_loss = None
734
+ for _ in range(30):
735
+ # one more iteration starting from the previous results
736
+ W, H, _ = non_negative_factorization(
737
+ X,
738
+ W,
739
+ H,
740
+ beta_loss=beta_loss,
741
+ init="custom",
742
+ n_components=n_components,
743
+ max_iter=1,
744
+ alpha_W=alpha,
745
+ solver=solver,
746
+ tol=tol,
747
+ l1_ratio=l1_ratio,
748
+ verbose=0,
749
+ random_state=0,
750
+ update_H=True,
751
+ )
752
+
753
+ loss = (
754
+ nmf._beta_divergence(X, W, H, beta_loss)
755
+ + alpha * l1_ratio * n_features * W.sum()
756
+ + alpha * l1_ratio * n_samples * H.sum()
757
+ + alpha * (1 - l1_ratio) * n_features * (W**2).sum()
758
+ + alpha * (1 - l1_ratio) * n_samples * (H**2).sum()
759
+ )
760
+ if previous_loss is not None:
761
+ assert previous_loss > loss
762
+ previous_loss = loss
763
+
764
+
765
+ def test_nmf_underflow():
766
+ # Regression test for an underflow issue in _beta_divergence
767
+ rng = np.random.RandomState(0)
768
+ n_samples, n_features, n_components = 10, 2, 2
769
+ X = np.abs(rng.randn(n_samples, n_features)) * 10
770
+ W = np.abs(rng.randn(n_samples, n_components)) * 10
771
+ H = np.abs(rng.randn(n_components, n_features))
772
+
773
+ X[0, 0] = 0
774
+ ref = nmf._beta_divergence(X, W, H, beta=1.0)
775
+ X[0, 0] = 1e-323
776
+ res = nmf._beta_divergence(X, W, H, beta=1.0)
777
+ assert_almost_equal(res, ref)
778
+
779
+
780
+ # TODO(1.6): remove the warning filter
781
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
782
+ @pytest.mark.parametrize(
783
+ "dtype_in, dtype_out",
784
+ [
785
+ (np.float32, np.float32),
786
+ (np.float64, np.float64),
787
+ (np.int32, np.float64),
788
+ (np.int64, np.float64),
789
+ ],
790
+ )
791
+ @pytest.mark.parametrize(
792
+ ["Estimator", "solver"],
793
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
794
+ )
795
+ def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out):
796
+ # Check that NMF preserves dtype (float32 and float64)
797
+ X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False)
798
+ np.abs(X, out=X)
799
+
800
+ nmf = Estimator(
801
+ alpha_W=1.0,
802
+ alpha_H=1.0,
803
+ tol=1e-2,
804
+ random_state=0,
805
+ **solver,
806
+ )
807
+
808
+ assert nmf.fit(X).transform(X).dtype == dtype_out
809
+ assert nmf.fit_transform(X).dtype == dtype_out
810
+ assert nmf.components_.dtype == dtype_out
811
+
812
+
813
+ # TODO(1.6): remove the warning filter
814
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
815
+ @pytest.mark.parametrize(
816
+ ["Estimator", "solver"],
817
+ [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
818
+ )
819
+ def test_nmf_float32_float64_consistency(Estimator, solver):
820
+ # Check that the result of NMF is the same between float32 and float64
821
+ X = np.random.RandomState(0).randn(50, 7)
822
+ np.abs(X, out=X)
823
+ nmf32 = Estimator(random_state=0, tol=1e-3, **solver)
824
+ W32 = nmf32.fit_transform(X.astype(np.float32))
825
+ nmf64 = Estimator(random_state=0, tol=1e-3, **solver)
826
+ W64 = nmf64.fit_transform(X)
827
+
828
+ assert_allclose(W32, W64, atol=1e-5)
829
+
830
+
831
+ # TODO(1.6): remove the warning filter
832
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
833
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
834
+ def test_nmf_custom_init_dtype_error(Estimator):
835
+ # Check that an error is raise if custom H and/or W don't have the same
836
+ # dtype as X.
837
+ rng = np.random.RandomState(0)
838
+ X = rng.random_sample((20, 15))
839
+ H = rng.random_sample((15, 15)).astype(np.float32)
840
+ W = rng.random_sample((20, 15))
841
+
842
+ with pytest.raises(TypeError, match="should have the same dtype as X"):
843
+ Estimator(init="custom").fit(X, H=H, W=W)
844
+
845
+ with pytest.raises(TypeError, match="should have the same dtype as X"):
846
+ non_negative_factorization(X, H=H, update_H=False)
847
+
848
+
849
+ @pytest.mark.parametrize("beta_loss", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5])
850
+ def test_nmf_minibatchnmf_equivalence(beta_loss):
851
+ # Test that MiniBatchNMF is equivalent to NMF when batch_size = n_samples and
852
+ # forget_factor 0.0 (stopping criterion put aside)
853
+ rng = np.random.mtrand.RandomState(42)
854
+ X = np.abs(rng.randn(48, 5))
855
+
856
+ nmf = NMF(
857
+ n_components=5,
858
+ beta_loss=beta_loss,
859
+ solver="mu",
860
+ random_state=0,
861
+ tol=0,
862
+ )
863
+ mbnmf = MiniBatchNMF(
864
+ n_components=5,
865
+ beta_loss=beta_loss,
866
+ random_state=0,
867
+ tol=0,
868
+ max_no_improvement=None,
869
+ batch_size=X.shape[0],
870
+ forget_factor=0.0,
871
+ )
872
+ W = nmf.fit_transform(X)
873
+ mbW = mbnmf.fit_transform(X)
874
+ assert_allclose(W, mbW)
875
+
876
+
877
+ def test_minibatch_nmf_partial_fit():
878
+ # Check fit / partial_fit equivalence. Applicable only with fresh restarts.
879
+ rng = np.random.mtrand.RandomState(42)
880
+ X = np.abs(rng.randn(100, 5))
881
+
882
+ n_components = 5
883
+ batch_size = 10
884
+ max_iter = 2
885
+
886
+ mbnmf1 = MiniBatchNMF(
887
+ n_components=n_components,
888
+ init="custom",
889
+ random_state=0,
890
+ max_iter=max_iter,
891
+ batch_size=batch_size,
892
+ tol=0,
893
+ max_no_improvement=None,
894
+ fresh_restarts=False,
895
+ )
896
+ mbnmf2 = MiniBatchNMF(n_components=n_components, init="custom", random_state=0)
897
+
898
+ # Force the same init of H (W is recomputed anyway) to be able to compare results.
899
+ W, H = nmf._initialize_nmf(
900
+ X, n_components=n_components, init="random", random_state=0
901
+ )
902
+
903
+ mbnmf1.fit(X, W=W, H=H)
904
+ for i in range(max_iter):
905
+ for j in range(batch_size):
906
+ mbnmf2.partial_fit(X[j : j + batch_size], W=W[:batch_size], H=H)
907
+
908
+ assert mbnmf1.n_steps_ == mbnmf2.n_steps_
909
+ assert_allclose(mbnmf1.components_, mbnmf2.components_)
910
+
911
+
912
+ def test_feature_names_out():
913
+ """Check feature names out for NMF."""
914
+ random_state = np.random.RandomState(0)
915
+ X = np.abs(random_state.randn(10, 4))
916
+ nmf = NMF(n_components=3).fit(X)
917
+
918
+ names = nmf.get_feature_names_out()
919
+ assert_array_equal([f"nmf{i}" for i in range(3)], names)
920
+
921
+
922
+ # TODO(1.6): remove the warning filter
923
+ @pytest.mark.filterwarnings("ignore:The default value of `n_components` will change")
924
+ def test_minibatch_nmf_verbose():
925
+ # Check verbose mode of MiniBatchNMF for better coverage.
926
+ A = np.random.RandomState(0).random_sample((100, 10))
927
+ nmf = MiniBatchNMF(tol=1e-2, random_state=0, verbose=1)
928
+ old_stdout = sys.stdout
929
+ sys.stdout = StringIO()
930
+ try:
931
+ nmf.fit(A)
932
+ finally:
933
+ sys.stdout = old_stdout
934
+
935
+
936
+ # TODO(1.5): remove this test
937
+ def test_NMF_inverse_transform_W_deprecation():
938
+ rng = np.random.mtrand.RandomState(42)
939
+ A = np.abs(rng.randn(6, 5))
940
+ est = NMF(
941
+ n_components=3,
942
+ init="random",
943
+ random_state=0,
944
+ tol=1e-6,
945
+ )
946
+ Xt = est.fit_transform(A)
947
+
948
+ with pytest.raises(TypeError, match="Missing required positional argument"):
949
+ est.inverse_transform()
950
+
951
+ with pytest.raises(ValueError, match="Please provide only"):
952
+ est.inverse_transform(Xt=Xt, W=Xt)
953
+
954
+ with warnings.catch_warnings(record=True):
955
+ warnings.simplefilter("error")
956
+ est.inverse_transform(Xt)
957
+
958
+ with pytest.warns(FutureWarning, match="Input argument `W` was renamed to `Xt`"):
959
+ est.inverse_transform(W=Xt)
960
+
961
+
962
+ @pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
963
+ def test_nmf_n_components_auto(Estimator):
964
+ # Check that n_components is correctly inferred
965
+ # from the provided custom initialization.
966
+ rng = np.random.RandomState(0)
967
+ X = rng.random_sample((6, 5))
968
+ W = rng.random_sample((6, 2))
969
+ H = rng.random_sample((2, 5))
970
+ est = Estimator(
971
+ n_components="auto",
972
+ init="custom",
973
+ random_state=0,
974
+ tol=1e-6,
975
+ )
976
+ est.fit_transform(X, W=W, H=H)
977
+ assert est._n_components == H.shape[0]
978
+
979
+
980
+ def test_nmf_non_negative_factorization_n_components_auto():
981
+ # Check that n_components is correctly inferred from the provided
982
+ # custom initialization.
983
+ rng = np.random.RandomState(0)
984
+ X = rng.random_sample((6, 5))
985
+ W_init = rng.random_sample((6, 2))
986
+ H_init = rng.random_sample((2, 5))
987
+ W, H, _ = non_negative_factorization(
988
+ X, W=W_init, H=H_init, init="custom", n_components="auto"
989
+ )
990
+ assert H.shape == H_init.shape
991
+ assert W.shape == W_init.shape
992
+
993
+
994
+ # TODO(1.6): remove
995
+ def test_nmf_n_components_default_value_warning():
996
+ rng = np.random.RandomState(0)
997
+ X = rng.random_sample((6, 5))
998
+ H = rng.random_sample((2, 5))
999
+ with pytest.warns(
1000
+ FutureWarning, match="The default value of `n_components` will change from"
1001
+ ):
1002
+ non_negative_factorization(X, H=H)
1003
+
1004
+
1005
+ def test_nmf_n_components_auto_no_h_update():
1006
+ # Tests that non_negative_factorization does not fail when setting
1007
+ # n_components="auto" also tests that the inferred n_component
1008
+ # value is the right one.
1009
+ rng = np.random.RandomState(0)
1010
+ X = rng.random_sample((6, 5))
1011
+ H_true = rng.random_sample((2, 5))
1012
+ W, H, _ = non_negative_factorization(
1013
+ X, H=H_true, n_components="auto", update_H=False
1014
+ ) # should not fail
1015
+ assert_allclose(H, H_true)
1016
+ assert W.shape == (X.shape[0], H_true.shape[0])
1017
+
1018
+
1019
+ def test_nmf_w_h_not_used_warning():
1020
+ # Check that warnings are raised if user provided W and H are not used
1021
+ # and initialization overrides value of W or H
1022
+ rng = np.random.RandomState(0)
1023
+ X = rng.random_sample((6, 5))
1024
+ W_init = rng.random_sample((6, 2))
1025
+ H_init = rng.random_sample((2, 5))
1026
+ with pytest.warns(
1027
+ RuntimeWarning,
1028
+ match="When init!='custom', provided W or H are ignored",
1029
+ ):
1030
+ non_negative_factorization(X, H=H_init, update_H=True, n_components="auto")
1031
+
1032
+ with pytest.warns(
1033
+ RuntimeWarning,
1034
+ match="When init!='custom', provided W or H are ignored",
1035
+ ):
1036
+ non_negative_factorization(
1037
+ X, W=W_init, H=H_init, update_H=True, n_components="auto"
1038
+ )
1039
+
1040
+ with pytest.warns(
1041
+ RuntimeWarning, match="When update_H=False, the provided initial W is not used."
1042
+ ):
1043
+ # When update_H is False, W is ignored regardless of init
1044
+ # TODO: use the provided W when init="custom".
1045
+ non_negative_factorization(
1046
+ X, W=W_init, H=H_init, update_H=False, n_components="auto"
1047
+ )
1048
+
1049
+
1050
+ def test_nmf_custom_init_shape_error():
1051
+ # Check that an informative error is raised when custom initialization does not
1052
+ # have the right shape
1053
+ rng = np.random.RandomState(0)
1054
+ X = rng.random_sample((6, 5))
1055
+ H = rng.random_sample((2, 5))
1056
+ nmf = NMF(n_components=2, init="custom", random_state=0)
1057
+
1058
+ with pytest.raises(ValueError, match="Array with wrong first dimension passed"):
1059
+ nmf.fit(X, H=H, W=rng.random_sample((5, 2)))
1060
+
1061
+ with pytest.raises(ValueError, match="Array with wrong second dimension passed"):
1062
+ nmf.fit(X, H=H, W=rng.random_sample((6, 3)))
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py ADDED
@@ -0,0 +1,987 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import warnings
3
+
4
+ import numpy as np
5
+ import pytest
6
+ import scipy as sp
7
+ from numpy.testing import assert_array_equal
8
+
9
+ from sklearn import config_context, datasets
10
+ from sklearn.base import clone
11
+ from sklearn.datasets import load_iris, make_classification
12
+ from sklearn.decomposition import PCA
13
+ from sklearn.decomposition._pca import _assess_dimension, _infer_dimension
14
+ from sklearn.utils._array_api import (
15
+ _atol_for_type,
16
+ _convert_to_numpy,
17
+ yield_namespace_device_dtype_combinations,
18
+ )
19
+ from sklearn.utils._array_api import device as array_device
20
+ from sklearn.utils._testing import _array_api_for_tests, assert_allclose
21
+ from sklearn.utils.estimator_checks import (
22
+ _get_check_estimator_ids,
23
+ check_array_api_input_and_values,
24
+ )
25
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
26
+
27
+ iris = datasets.load_iris()
28
+ PCA_SOLVERS = ["full", "arpack", "randomized", "auto"]
29
+
30
+ # `SPARSE_M` and `SPARSE_N` could be larger, but be aware:
31
+ # * SciPy's generation of random sparse matrix can be costly
32
+ # * A (SPARSE_M, SPARSE_N) dense array is allocated to compare against
33
+ SPARSE_M, SPARSE_N = 1000, 300 # arbitrary
34
+ SPARSE_MAX_COMPONENTS = min(SPARSE_M, SPARSE_N)
35
+
36
+
37
+ def _check_fitted_pca_close(pca1, pca2, rtol):
38
+ assert_allclose(pca1.components_, pca2.components_, rtol=rtol)
39
+ assert_allclose(pca1.explained_variance_, pca2.explained_variance_, rtol=rtol)
40
+ assert_allclose(pca1.singular_values_, pca2.singular_values_, rtol=rtol)
41
+ assert_allclose(pca1.mean_, pca2.mean_, rtol=rtol)
42
+ assert_allclose(pca1.n_components_, pca2.n_components_, rtol=rtol)
43
+ assert_allclose(pca1.n_samples_, pca2.n_samples_, rtol=rtol)
44
+ assert_allclose(pca1.noise_variance_, pca2.noise_variance_, rtol=rtol)
45
+ assert_allclose(pca1.n_features_in_, pca2.n_features_in_, rtol=rtol)
46
+
47
+
48
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
49
+ @pytest.mark.parametrize("n_components", range(1, iris.data.shape[1]))
50
+ def test_pca(svd_solver, n_components):
51
+ X = iris.data
52
+ pca = PCA(n_components=n_components, svd_solver=svd_solver)
53
+
54
+ # check the shape of fit.transform
55
+ X_r = pca.fit(X).transform(X)
56
+ assert X_r.shape[1] == n_components
57
+
58
+ # check the equivalence of fit.transform and fit_transform
59
+ X_r2 = pca.fit_transform(X)
60
+ assert_allclose(X_r, X_r2)
61
+ X_r = pca.transform(X)
62
+ assert_allclose(X_r, X_r2)
63
+
64
+ # Test get_covariance and get_precision
65
+ cov = pca.get_covariance()
66
+ precision = pca.get_precision()
67
+ assert_allclose(np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-12)
68
+
69
+
70
+ @pytest.mark.parametrize("density", [0.01, 0.1, 0.30])
71
+ @pytest.mark.parametrize("n_components", [1, 2, 10])
72
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
73
+ @pytest.mark.parametrize("svd_solver", ["arpack"])
74
+ @pytest.mark.parametrize("scale", [1, 10, 100])
75
+ def test_pca_sparse(
76
+ global_random_seed, svd_solver, sparse_container, n_components, density, scale
77
+ ):
78
+ # Make sure any tolerance changes pass with SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all"
79
+ rtol = 5e-07
80
+ transform_rtol = 3e-05
81
+
82
+ random_state = np.random.default_rng(global_random_seed)
83
+ X = sparse_container(
84
+ sp.sparse.random(
85
+ SPARSE_M,
86
+ SPARSE_N,
87
+ random_state=random_state,
88
+ density=density,
89
+ )
90
+ )
91
+ # Scale the data + vary the column means
92
+ scale_vector = random_state.random(X.shape[1]) * scale
93
+ X = X.multiply(scale_vector)
94
+
95
+ pca = PCA(
96
+ n_components=n_components,
97
+ svd_solver=svd_solver,
98
+ random_state=global_random_seed,
99
+ )
100
+ pca.fit(X)
101
+
102
+ Xd = X.toarray()
103
+ pcad = PCA(
104
+ n_components=n_components,
105
+ svd_solver=svd_solver,
106
+ random_state=global_random_seed,
107
+ )
108
+ pcad.fit(Xd)
109
+
110
+ # Fitted attributes equality
111
+ _check_fitted_pca_close(pca, pcad, rtol=rtol)
112
+
113
+ # Test transform
114
+ X2 = sparse_container(
115
+ sp.sparse.random(
116
+ SPARSE_M,
117
+ SPARSE_N,
118
+ random_state=random_state,
119
+ density=density,
120
+ )
121
+ )
122
+ X2d = X2.toarray()
123
+
124
+ assert_allclose(pca.transform(X2), pca.transform(X2d), rtol=transform_rtol)
125
+ assert_allclose(pca.transform(X2), pcad.transform(X2d), rtol=transform_rtol)
126
+
127
+
128
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
129
+ def test_pca_sparse_fit_transform(global_random_seed, sparse_container):
130
+ random_state = np.random.default_rng(global_random_seed)
131
+ X = sparse_container(
132
+ sp.sparse.random(
133
+ SPARSE_M,
134
+ SPARSE_N,
135
+ random_state=random_state,
136
+ density=0.01,
137
+ )
138
+ )
139
+ X2 = sparse_container(
140
+ sp.sparse.random(
141
+ SPARSE_M,
142
+ SPARSE_N,
143
+ random_state=random_state,
144
+ density=0.01,
145
+ )
146
+ )
147
+
148
+ pca_fit = PCA(n_components=10, svd_solver="arpack", random_state=global_random_seed)
149
+ pca_fit_transform = PCA(
150
+ n_components=10, svd_solver="arpack", random_state=global_random_seed
151
+ )
152
+
153
+ pca_fit.fit(X)
154
+ transformed_X = pca_fit_transform.fit_transform(X)
155
+
156
+ _check_fitted_pca_close(pca_fit, pca_fit_transform, rtol=1e-10)
157
+ assert_allclose(transformed_X, pca_fit_transform.transform(X), rtol=2e-9)
158
+ assert_allclose(transformed_X, pca_fit.transform(X), rtol=2e-9)
159
+ assert_allclose(pca_fit.transform(X2), pca_fit_transform.transform(X2), rtol=2e-9)
160
+
161
+
162
+ @pytest.mark.parametrize("svd_solver", ["randomized", "full", "auto"])
163
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
164
+ def test_sparse_pca_solver_error(global_random_seed, svd_solver, sparse_container):
165
+ random_state = np.random.RandomState(global_random_seed)
166
+ X = sparse_container(
167
+ sp.sparse.random(
168
+ SPARSE_M,
169
+ SPARSE_N,
170
+ random_state=random_state,
171
+ )
172
+ )
173
+ pca = PCA(n_components=30, svd_solver=svd_solver)
174
+ error_msg_pattern = (
175
+ f'PCA only support sparse inputs with the "arpack" solver, while "{svd_solver}"'
176
+ " was passed"
177
+ )
178
+ with pytest.raises(TypeError, match=error_msg_pattern):
179
+ pca.fit(X)
180
+
181
+
182
+ def test_no_empty_slice_warning():
183
+ # test if we avoid numpy warnings for computing over empty arrays
184
+ n_components = 10
185
+ n_features = n_components + 2 # anything > n_comps triggered it in 0.16
186
+ X = np.random.uniform(-1, 1, size=(n_components, n_features))
187
+ pca = PCA(n_components=n_components)
188
+ with warnings.catch_warnings():
189
+ warnings.simplefilter("error", RuntimeWarning)
190
+ pca.fit(X)
191
+
192
+
193
+ @pytest.mark.parametrize("copy", [True, False])
194
+ @pytest.mark.parametrize("solver", PCA_SOLVERS)
195
+ def test_whitening(solver, copy):
196
+ # Check that PCA output has unit-variance
197
+ rng = np.random.RandomState(0)
198
+ n_samples = 100
199
+ n_features = 80
200
+ n_components = 30
201
+ rank = 50
202
+
203
+ # some low rank data with correlated features
204
+ X = np.dot(
205
+ rng.randn(n_samples, rank),
206
+ np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features)),
207
+ )
208
+ # the component-wise variance of the first 50 features is 3 times the
209
+ # mean component-wise variance of the remaining 30 features
210
+ X[:, :50] *= 3
211
+
212
+ assert X.shape == (n_samples, n_features)
213
+
214
+ # the component-wise variance is thus highly varying:
215
+ assert X.std(axis=0).std() > 43.8
216
+
217
+ # whiten the data while projecting to the lower dim subspace
218
+ X_ = X.copy() # make sure we keep an original across iterations.
219
+ pca = PCA(
220
+ n_components=n_components,
221
+ whiten=True,
222
+ copy=copy,
223
+ svd_solver=solver,
224
+ random_state=0,
225
+ iterated_power=7,
226
+ )
227
+ # test fit_transform
228
+ X_whitened = pca.fit_transform(X_.copy())
229
+ assert X_whitened.shape == (n_samples, n_components)
230
+ X_whitened2 = pca.transform(X_)
231
+ assert_allclose(X_whitened, X_whitened2, rtol=5e-4)
232
+
233
+ assert_allclose(X_whitened.std(ddof=1, axis=0), np.ones(n_components))
234
+ assert_allclose(X_whitened.mean(axis=0), np.zeros(n_components), atol=1e-12)
235
+
236
+ X_ = X.copy()
237
+ pca = PCA(
238
+ n_components=n_components, whiten=False, copy=copy, svd_solver=solver
239
+ ).fit(X_.copy())
240
+ X_unwhitened = pca.transform(X_)
241
+ assert X_unwhitened.shape == (n_samples, n_components)
242
+
243
+ # in that case the output components still have varying variances
244
+ assert X_unwhitened.std(axis=0).std() == pytest.approx(74.1, rel=1e-1)
245
+ # we always center, so no test for non-centering.
246
+
247
+
248
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
249
+ def test_pca_explained_variance_equivalence_solver(svd_solver):
250
+ rng = np.random.RandomState(0)
251
+ n_samples, n_features = 100, 80
252
+ X = rng.randn(n_samples, n_features)
253
+
254
+ pca_full = PCA(n_components=2, svd_solver="full")
255
+ pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=0)
256
+
257
+ pca_full.fit(X)
258
+ pca_other.fit(X)
259
+
260
+ assert_allclose(
261
+ pca_full.explained_variance_, pca_other.explained_variance_, rtol=5e-2
262
+ )
263
+ assert_allclose(
264
+ pca_full.explained_variance_ratio_,
265
+ pca_other.explained_variance_ratio_,
266
+ rtol=5e-2,
267
+ )
268
+
269
+
270
+ @pytest.mark.parametrize(
271
+ "X",
272
+ [
273
+ np.random.RandomState(0).randn(100, 80),
274
+ datasets.make_classification(100, 80, n_informative=78, random_state=0)[0],
275
+ ],
276
+ ids=["random-data", "correlated-data"],
277
+ )
278
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
279
+ def test_pca_explained_variance_empirical(X, svd_solver):
280
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=0)
281
+ X_pca = pca.fit_transform(X)
282
+ assert_allclose(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0))
283
+
284
+ expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0]
285
+ expected_result = sorted(expected_result, reverse=True)[:2]
286
+ assert_allclose(pca.explained_variance_, expected_result, rtol=5e-3)
287
+
288
+
289
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
290
+ def test_pca_singular_values_consistency(svd_solver):
291
+ rng = np.random.RandomState(0)
292
+ n_samples, n_features = 100, 80
293
+ X = rng.randn(n_samples, n_features)
294
+
295
+ pca_full = PCA(n_components=2, svd_solver="full", random_state=rng)
296
+ pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
297
+
298
+ pca_full.fit(X)
299
+ pca_other.fit(X)
300
+
301
+ assert_allclose(pca_full.singular_values_, pca_other.singular_values_, rtol=5e-3)
302
+
303
+
304
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
305
+ def test_pca_singular_values(svd_solver):
306
+ rng = np.random.RandomState(0)
307
+ n_samples, n_features = 100, 80
308
+ X = rng.randn(n_samples, n_features)
309
+
310
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
311
+ X_trans = pca.fit_transform(X)
312
+
313
+ # compare to the Frobenius norm
314
+ assert_allclose(
315
+ np.sum(pca.singular_values_**2), np.linalg.norm(X_trans, "fro") ** 2
316
+ )
317
+ # Compare to the 2-norms of the score vectors
318
+ assert_allclose(pca.singular_values_, np.sqrt(np.sum(X_trans**2, axis=0)))
319
+
320
+ # set the singular values and see what er get back
321
+ n_samples, n_features = 100, 110
322
+ X = rng.randn(n_samples, n_features)
323
+
324
+ pca = PCA(n_components=3, svd_solver=svd_solver, random_state=rng)
325
+ X_trans = pca.fit_transform(X)
326
+ X_trans /= np.sqrt(np.sum(X_trans**2, axis=0))
327
+ X_trans[:, 0] *= 3.142
328
+ X_trans[:, 1] *= 2.718
329
+ X_hat = np.dot(X_trans, pca.components_)
330
+ pca.fit(X_hat)
331
+ assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0])
332
+
333
+
334
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
335
+ def test_pca_check_projection(svd_solver):
336
+ # Test that the projection of data is correct
337
+ rng = np.random.RandomState(0)
338
+ n, p = 100, 3
339
+ X = rng.randn(n, p) * 0.1
340
+ X[:10] += np.array([3, 4, 5])
341
+ Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
342
+
343
+ Yt = PCA(n_components=2, svd_solver=svd_solver).fit(X).transform(Xt)
344
+ Yt /= np.sqrt((Yt**2).sum())
345
+
346
+ assert_allclose(np.abs(Yt[0][0]), 1.0, rtol=5e-3)
347
+
348
+
349
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
350
+ def test_pca_check_projection_list(svd_solver):
351
+ # Test that the projection of data is correct
352
+ X = [[1.0, 0.0], [0.0, 1.0]]
353
+ pca = PCA(n_components=1, svd_solver=svd_solver, random_state=0)
354
+ X_trans = pca.fit_transform(X)
355
+ assert X_trans.shape, (2, 1)
356
+ assert_allclose(X_trans.mean(), 0.00, atol=1e-12)
357
+ assert_allclose(X_trans.std(), 0.71, rtol=5e-3)
358
+
359
+
360
+ @pytest.mark.parametrize("svd_solver", ["full", "arpack", "randomized"])
361
+ @pytest.mark.parametrize("whiten", [False, True])
362
+ def test_pca_inverse(svd_solver, whiten):
363
+ # Test that the projection of data can be inverted
364
+ rng = np.random.RandomState(0)
365
+ n, p = 50, 3
366
+ X = rng.randn(n, p) # spherical data
367
+ X[:, 1] *= 0.00001 # make middle component relatively small
368
+ X += [5, 4, 3] # make a large mean
369
+
370
+ # same check that we can find the original data from the transformed
371
+ # signal (since the data is almost of rank n_components)
372
+ pca = PCA(n_components=2, svd_solver=svd_solver, whiten=whiten).fit(X)
373
+ Y = pca.transform(X)
374
+ Y_inverse = pca.inverse_transform(Y)
375
+ assert_allclose(X, Y_inverse, rtol=5e-6)
376
+
377
+
378
+ @pytest.mark.parametrize(
379
+ "data", [np.array([[0, 1, 0], [1, 0, 0]]), np.array([[0, 1, 0], [1, 0, 0]]).T]
380
+ )
381
+ @pytest.mark.parametrize(
382
+ "svd_solver, n_components, err_msg",
383
+ [
384
+ ("arpack", 0, r"must be between 1 and min\(n_samples, n_features\)"),
385
+ ("randomized", 0, r"must be between 1 and min\(n_samples, n_features\)"),
386
+ ("arpack", 2, r"must be strictly less than min"),
387
+ (
388
+ "auto",
389
+ 3,
390
+ (
391
+ r"n_components=3 must be between 0 and min\(n_samples, "
392
+ r"n_features\)=2 with svd_solver='full'"
393
+ ),
394
+ ),
395
+ ],
396
+ )
397
+ def test_pca_validation(svd_solver, data, n_components, err_msg):
398
+ # Ensures that solver-specific extreme inputs for the n_components
399
+ # parameter raise errors
400
+ smallest_d = 2 # The smallest dimension
401
+ pca_fitted = PCA(n_components, svd_solver=svd_solver)
402
+
403
+ with pytest.raises(ValueError, match=err_msg):
404
+ pca_fitted.fit(data)
405
+
406
+ # Additional case for arpack
407
+ if svd_solver == "arpack":
408
+ n_components = smallest_d
409
+
410
+ err_msg = (
411
+ "n_components={}L? must be strictly less than "
412
+ r"min\(n_samples, n_features\)={}L? with "
413
+ "svd_solver='arpack'".format(n_components, smallest_d)
414
+ )
415
+ with pytest.raises(ValueError, match=err_msg):
416
+ PCA(n_components, svd_solver=svd_solver).fit(data)
417
+
418
+
419
+ @pytest.mark.parametrize(
420
+ "solver, n_components_",
421
+ [
422
+ ("full", min(iris.data.shape)),
423
+ ("arpack", min(iris.data.shape) - 1),
424
+ ("randomized", min(iris.data.shape)),
425
+ ],
426
+ )
427
+ @pytest.mark.parametrize("data", [iris.data, iris.data.T])
428
+ def test_n_components_none(data, solver, n_components_):
429
+ pca = PCA(svd_solver=solver)
430
+ pca.fit(data)
431
+ assert pca.n_components_ == n_components_
432
+
433
+
434
+ @pytest.mark.parametrize("svd_solver", ["auto", "full"])
435
+ def test_n_components_mle(svd_solver):
436
+ # Ensure that n_components == 'mle' doesn't raise error for auto/full
437
+ rng = np.random.RandomState(0)
438
+ n_samples, n_features = 600, 10
439
+ X = rng.randn(n_samples, n_features)
440
+ pca = PCA(n_components="mle", svd_solver=svd_solver)
441
+ pca.fit(X)
442
+ assert pca.n_components_ == 1
443
+
444
+
445
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
446
+ def test_n_components_mle_error(svd_solver):
447
+ # Ensure that n_components == 'mle' will raise an error for unsupported
448
+ # solvers
449
+ rng = np.random.RandomState(0)
450
+ n_samples, n_features = 600, 10
451
+ X = rng.randn(n_samples, n_features)
452
+ pca = PCA(n_components="mle", svd_solver=svd_solver)
453
+ err_msg = "n_components='mle' cannot be a string with svd_solver='{}'".format(
454
+ svd_solver
455
+ )
456
+ with pytest.raises(ValueError, match=err_msg):
457
+ pca.fit(X)
458
+
459
+
460
+ def test_pca_dim():
461
+ # Check automated dimensionality setting
462
+ rng = np.random.RandomState(0)
463
+ n, p = 100, 5
464
+ X = rng.randn(n, p) * 0.1
465
+ X[:10] += np.array([3, 4, 5, 1, 2])
466
+ pca = PCA(n_components="mle", svd_solver="full").fit(X)
467
+ assert pca.n_components == "mle"
468
+ assert pca.n_components_ == 1
469
+
470
+
471
+ def test_infer_dim_1():
472
+ # TODO: explain what this is testing
473
+ # Or at least use explicit variable names...
474
+ n, p = 1000, 5
475
+ rng = np.random.RandomState(0)
476
+ X = (
477
+ rng.randn(n, p) * 0.1
478
+ + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
479
+ + np.array([1, 0, 7, 4, 6])
480
+ )
481
+ pca = PCA(n_components=p, svd_solver="full")
482
+ pca.fit(X)
483
+ spect = pca.explained_variance_
484
+ ll = np.array([_assess_dimension(spect, k, n) for k in range(1, p)])
485
+ assert ll[1] > ll.max() - 0.01 * n
486
+
487
+
488
+ def test_infer_dim_2():
489
+ # TODO: explain what this is testing
490
+ # Or at least use explicit variable names...
491
+ n, p = 1000, 5
492
+ rng = np.random.RandomState(0)
493
+ X = rng.randn(n, p) * 0.1
494
+ X[:10] += np.array([3, 4, 5, 1, 2])
495
+ X[10:20] += np.array([6, 0, 7, 2, -1])
496
+ pca = PCA(n_components=p, svd_solver="full")
497
+ pca.fit(X)
498
+ spect = pca.explained_variance_
499
+ assert _infer_dimension(spect, n) > 1
500
+
501
+
502
+ def test_infer_dim_3():
503
+ n, p = 100, 5
504
+ rng = np.random.RandomState(0)
505
+ X = rng.randn(n, p) * 0.1
506
+ X[:10] += np.array([3, 4, 5, 1, 2])
507
+ X[10:20] += np.array([6, 0, 7, 2, -1])
508
+ X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
509
+ pca = PCA(n_components=p, svd_solver="full")
510
+ pca.fit(X)
511
+ spect = pca.explained_variance_
512
+ assert _infer_dimension(spect, n) > 2
513
+
514
+
515
+ @pytest.mark.parametrize(
516
+ "X, n_components, n_components_validated",
517
+ [
518
+ (iris.data, 0.95, 2), # row > col
519
+ (iris.data, 0.01, 1), # row > col
520
+ (np.random.RandomState(0).rand(5, 20), 0.5, 2),
521
+ ], # row < col
522
+ )
523
+ def test_infer_dim_by_explained_variance(X, n_components, n_components_validated):
524
+ pca = PCA(n_components=n_components, svd_solver="full")
525
+ pca.fit(X)
526
+ assert pca.n_components == pytest.approx(n_components)
527
+ assert pca.n_components_ == n_components_validated
528
+
529
+
530
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
531
+ def test_pca_score(svd_solver):
532
+ # Test that probabilistic PCA scoring yields a reasonable score
533
+ n, p = 1000, 3
534
+ rng = np.random.RandomState(0)
535
+ X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
536
+ pca = PCA(n_components=2, svd_solver=svd_solver)
537
+ pca.fit(X)
538
+
539
+ ll1 = pca.score(X)
540
+ h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1**2) * p
541
+ assert_allclose(ll1 / h, 1, rtol=5e-2)
542
+
543
+ ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5]))
544
+ assert ll1 > ll2
545
+
546
+ pca = PCA(n_components=2, whiten=True, svd_solver=svd_solver)
547
+ pca.fit(X)
548
+ ll2 = pca.score(X)
549
+ assert ll1 > ll2
550
+
551
+
552
+ def test_pca_score3():
553
+ # Check that probabilistic PCA selects the right model
554
+ n, p = 200, 3
555
+ rng = np.random.RandomState(0)
556
+ Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
557
+ Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
558
+ ll = np.zeros(p)
559
+ for k in range(p):
560
+ pca = PCA(n_components=k, svd_solver="full")
561
+ pca.fit(Xl)
562
+ ll[k] = pca.score(Xt)
563
+
564
+ assert ll.argmax() == 1
565
+
566
+
567
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
568
+ def test_pca_sanity_noise_variance(svd_solver):
569
+ # Sanity check for the noise_variance_. For more details see
570
+ # https://github.com/scikit-learn/scikit-learn/issues/7568
571
+ # https://github.com/scikit-learn/scikit-learn/issues/8541
572
+ # https://github.com/scikit-learn/scikit-learn/issues/8544
573
+ X, _ = datasets.load_digits(return_X_y=True)
574
+ pca = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
575
+ pca.fit(X)
576
+ assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0)
577
+
578
+
579
+ @pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
580
+ def test_pca_score_consistency_solvers(svd_solver):
581
+ # Check the consistency of score between solvers
582
+ X, _ = datasets.load_digits(return_X_y=True)
583
+ pca_full = PCA(n_components=30, svd_solver="full", random_state=0)
584
+ pca_other = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
585
+ pca_full.fit(X)
586
+ pca_other.fit(X)
587
+ assert_allclose(pca_full.score(X), pca_other.score(X), rtol=5e-6)
588
+
589
+
590
+ # arpack raises ValueError for n_components == min(n_samples, n_features)
591
+ @pytest.mark.parametrize("svd_solver", ["full", "randomized"])
592
+ def test_pca_zero_noise_variance_edge_cases(svd_solver):
593
+ # ensure that noise_variance_ is 0 in edge cases
594
+ # when n_components == min(n_samples, n_features)
595
+ n, p = 100, 3
596
+ rng = np.random.RandomState(0)
597
+ X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
598
+
599
+ pca = PCA(n_components=p, svd_solver=svd_solver)
600
+ pca.fit(X)
601
+ assert pca.noise_variance_ == 0
602
+ # Non-regression test for gh-12489
603
+ # ensure no divide-by-zero error for n_components == n_features < n_samples
604
+ pca.score(X)
605
+
606
+ pca.fit(X.T)
607
+ assert pca.noise_variance_ == 0
608
+ # Non-regression test for gh-12489
609
+ # ensure no divide-by-zero error for n_components == n_samples < n_features
610
+ pca.score(X.T)
611
+
612
+
613
+ @pytest.mark.parametrize(
614
+ "data, n_components, expected_solver",
615
+ [ # case: n_components in (0,1) => 'full'
616
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 0.5, "full"),
617
+ # case: max(X.shape) <= 500 => 'full'
618
+ (np.random.RandomState(0).uniform(size=(10, 50)), 5, "full"),
619
+ # case: n_components >= .8 * min(X.shape) => 'full'
620
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 50, "full"),
621
+ # n_components >= 1 and n_components < .8*min(X.shape) => 'randomized'
622
+ (np.random.RandomState(0).uniform(size=(1000, 50)), 10, "randomized"),
623
+ ],
624
+ )
625
+ def test_pca_svd_solver_auto(data, n_components, expected_solver):
626
+ pca_auto = PCA(n_components=n_components, random_state=0)
627
+ pca_test = PCA(
628
+ n_components=n_components, svd_solver=expected_solver, random_state=0
629
+ )
630
+ pca_auto.fit(data)
631
+ pca_test.fit(data)
632
+ assert_allclose(pca_auto.components_, pca_test.components_)
633
+
634
+
635
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
636
+ def test_pca_deterministic_output(svd_solver):
637
+ rng = np.random.RandomState(0)
638
+ X = rng.rand(10, 10)
639
+
640
+ transformed_X = np.zeros((20, 2))
641
+ for i in range(20):
642
+ pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
643
+ transformed_X[i, :] = pca.fit_transform(X)[0]
644
+ assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
645
+
646
+
647
+ @pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
648
+ def test_pca_dtype_preservation(svd_solver):
649
+ check_pca_float_dtype_preservation(svd_solver)
650
+ check_pca_int_dtype_upcast_to_double(svd_solver)
651
+
652
+
653
+ def check_pca_float_dtype_preservation(svd_solver):
654
+ # Ensure that PCA does not upscale the dtype when input is float32
655
+ X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False)
656
+ X_32 = X_64.astype(np.float32)
657
+
658
+ pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64)
659
+ pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32)
660
+
661
+ assert pca_64.components_.dtype == np.float64
662
+ assert pca_32.components_.dtype == np.float32
663
+ assert pca_64.transform(X_64).dtype == np.float64
664
+ assert pca_32.transform(X_32).dtype == np.float32
665
+
666
+ # the rtol is set such that the test passes on all platforms tested on
667
+ # conda-forge: PR#15775
668
+ # see: https://github.com/conda-forge/scikit-learn-feedstock/pull/113
669
+ assert_allclose(pca_64.components_, pca_32.components_, rtol=2e-4)
670
+
671
+
672
+ def check_pca_int_dtype_upcast_to_double(svd_solver):
673
+ # Ensure that all int types will be upcast to float64
674
+ X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
675
+ X_i64 = X_i64.astype(np.int64, copy=False)
676
+ X_i32 = X_i64.astype(np.int32, copy=False)
677
+
678
+ pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64)
679
+ pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32)
680
+
681
+ assert pca_64.components_.dtype == np.float64
682
+ assert pca_32.components_.dtype == np.float64
683
+ assert pca_64.transform(X_i64).dtype == np.float64
684
+ assert pca_32.transform(X_i32).dtype == np.float64
685
+
686
+ assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-4)
687
+
688
+
689
+ def test_pca_n_components_mostly_explained_variance_ratio():
690
+ # when n_components is the second highest cumulative sum of the
691
+ # explained_variance_ratio_, then n_components_ should equal the
692
+ # number of features in the dataset #15669
693
+ X, y = load_iris(return_X_y=True)
694
+ pca1 = PCA().fit(X, y)
695
+
696
+ n_components = pca1.explained_variance_ratio_.cumsum()[-2]
697
+ pca2 = PCA(n_components=n_components).fit(X, y)
698
+ assert pca2.n_components_ == X.shape[1]
699
+
700
+
701
+ def test_assess_dimension_bad_rank():
702
+ # Test error when tested rank not in [1, n_features - 1]
703
+ spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
704
+ n_samples = 10
705
+ for rank in (0, 5):
706
+ with pytest.raises(ValueError, match=r"should be in \[1, n_features - 1\]"):
707
+ _assess_dimension(spectrum, rank, n_samples)
708
+
709
+
710
+ def test_small_eigenvalues_mle():
711
+ # Test rank associated with tiny eigenvalues are given a log-likelihood of
712
+ # -inf. The inferred rank will be 1
713
+ spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
714
+
715
+ assert _assess_dimension(spectrum, rank=1, n_samples=10) > -np.inf
716
+
717
+ for rank in (2, 3):
718
+ assert _assess_dimension(spectrum, rank, 10) == -np.inf
719
+
720
+ assert _infer_dimension(spectrum, 10) == 1
721
+
722
+
723
+ def test_mle_redundant_data():
724
+ # Test 'mle' with pathological X: only one relevant feature should give a
725
+ # rank of 1
726
+ X, _ = datasets.make_classification(
727
+ n_features=20,
728
+ n_informative=1,
729
+ n_repeated=18,
730
+ n_redundant=1,
731
+ n_clusters_per_class=1,
732
+ random_state=42,
733
+ )
734
+ pca = PCA(n_components="mle").fit(X)
735
+ assert pca.n_components_ == 1
736
+
737
+
738
+ def test_fit_mle_too_few_samples():
739
+ # Tests that an error is raised when the number of samples is smaller
740
+ # than the number of features during an mle fit
741
+ X, _ = datasets.make_classification(n_samples=20, n_features=21, random_state=42)
742
+
743
+ pca = PCA(n_components="mle", svd_solver="full")
744
+ with pytest.raises(
745
+ ValueError,
746
+ match="n_components='mle' is only supported if n_samples >= n_features",
747
+ ):
748
+ pca.fit(X)
749
+
750
+
751
+ def test_mle_simple_case():
752
+ # non-regression test for issue
753
+ # https://github.com/scikit-learn/scikit-learn/issues/16730
754
+ n_samples, n_dim = 1000, 10
755
+ X = np.random.RandomState(0).randn(n_samples, n_dim)
756
+ X[:, -1] = np.mean(X[:, :-1], axis=-1) # true X dim is ndim - 1
757
+ pca_skl = PCA("mle", svd_solver="full")
758
+ pca_skl.fit(X)
759
+ assert pca_skl.n_components_ == n_dim - 1
760
+
761
+
762
+ def test_assess_dimesion_rank_one():
763
+ # Make sure assess_dimension works properly on a matrix of rank 1
764
+ n_samples, n_features = 9, 6
765
+ X = np.ones((n_samples, n_features)) # rank 1 matrix
766
+ _, s, _ = np.linalg.svd(X, full_matrices=True)
767
+ # except for rank 1, all eigenvalues are 0 resp. close to 0 (FP)
768
+ assert_allclose(s[1:], np.zeros(n_features - 1), atol=1e-12)
769
+
770
+ assert np.isfinite(_assess_dimension(s, rank=1, n_samples=n_samples))
771
+ for rank in range(2, n_features):
772
+ assert _assess_dimension(s, rank, n_samples) == -np.inf
773
+
774
+
775
+ def test_pca_randomized_svd_n_oversamples():
776
+ """Check that exposing and setting `n_oversamples` will provide accurate results
777
+ even when `X` as a large number of features.
778
+
779
+ Non-regression test for:
780
+ https://github.com/scikit-learn/scikit-learn/issues/20589
781
+ """
782
+ rng = np.random.RandomState(0)
783
+ n_features = 100
784
+ X = rng.randn(1_000, n_features)
785
+
786
+ # The default value of `n_oversamples` will lead to inaccurate results
787
+ # We force it to the number of features.
788
+ pca_randomized = PCA(
789
+ n_components=1,
790
+ svd_solver="randomized",
791
+ n_oversamples=n_features,
792
+ random_state=0,
793
+ ).fit(X)
794
+ pca_full = PCA(n_components=1, svd_solver="full").fit(X)
795
+ pca_arpack = PCA(n_components=1, svd_solver="arpack", random_state=0).fit(X)
796
+
797
+ assert_allclose(np.abs(pca_full.components_), np.abs(pca_arpack.components_))
798
+ assert_allclose(np.abs(pca_randomized.components_), np.abs(pca_arpack.components_))
799
+
800
+
801
+ def test_feature_names_out():
802
+ """Check feature names out for PCA."""
803
+ pca = PCA(n_components=2).fit(iris.data)
804
+
805
+ names = pca.get_feature_names_out()
806
+ assert_array_equal([f"pca{i}" for i in range(2)], names)
807
+
808
+
809
+ @pytest.mark.parametrize("copy", [True, False])
810
+ def test_variance_correctness(copy):
811
+ """Check the accuracy of PCA's internal variance calculation"""
812
+ rng = np.random.RandomState(0)
813
+ X = rng.randn(1000, 200)
814
+ pca = PCA().fit(X)
815
+ pca_var = pca.explained_variance_ / pca.explained_variance_ratio_
816
+ true_var = np.var(X, ddof=1, axis=0).sum()
817
+ np.testing.assert_allclose(pca_var, true_var)
818
+
819
+
820
+ def check_array_api_get_precision(name, estimator, array_namespace, device, dtype_name):
821
+ xp = _array_api_for_tests(array_namespace, device)
822
+ iris_np = iris.data.astype(dtype_name)
823
+ iris_xp = xp.asarray(iris_np, device=device)
824
+
825
+ estimator.fit(iris_np)
826
+ precision_np = estimator.get_precision()
827
+ covariance_np = estimator.get_covariance()
828
+
829
+ with config_context(array_api_dispatch=True):
830
+ estimator_xp = clone(estimator).fit(iris_xp)
831
+ precision_xp = estimator_xp.get_precision()
832
+ assert precision_xp.shape == (4, 4)
833
+ assert precision_xp.dtype == iris_xp.dtype
834
+
835
+ assert_allclose(
836
+ _convert_to_numpy(precision_xp, xp=xp),
837
+ precision_np,
838
+ atol=_atol_for_type(dtype_name),
839
+ )
840
+ covariance_xp = estimator_xp.get_covariance()
841
+ assert covariance_xp.shape == (4, 4)
842
+ assert covariance_xp.dtype == iris_xp.dtype
843
+
844
+ assert_allclose(
845
+ _convert_to_numpy(covariance_xp, xp=xp),
846
+ covariance_np,
847
+ atol=_atol_for_type(dtype_name),
848
+ )
849
+
850
+
851
+ @pytest.mark.parametrize(
852
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
853
+ )
854
+ @pytest.mark.parametrize(
855
+ "check",
856
+ [check_array_api_input_and_values, check_array_api_get_precision],
857
+ ids=_get_check_estimator_ids,
858
+ )
859
+ @pytest.mark.parametrize(
860
+ "estimator",
861
+ [
862
+ PCA(n_components=2, svd_solver="full"),
863
+ PCA(n_components=0.1, svd_solver="full", whiten=True),
864
+ PCA(
865
+ n_components=2,
866
+ svd_solver="randomized",
867
+ power_iteration_normalizer="QR",
868
+ random_state=0, # how to use global_random_seed here?
869
+ ),
870
+ ],
871
+ ids=_get_check_estimator_ids,
872
+ )
873
+ def test_pca_array_api_compliance(
874
+ estimator, check, array_namespace, device, dtype_name
875
+ ):
876
+ name = estimator.__class__.__name__
877
+ check(name, estimator, array_namespace, device=device, dtype_name=dtype_name)
878
+
879
+
880
+ @pytest.mark.parametrize(
881
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
882
+ )
883
+ @pytest.mark.parametrize(
884
+ "check",
885
+ [check_array_api_get_precision],
886
+ ids=_get_check_estimator_ids,
887
+ )
888
+ @pytest.mark.parametrize(
889
+ "estimator",
890
+ [
891
+ # PCA with mle cannot use check_array_api_input_and_values because of
892
+ # rounding errors in the noisy (low variance) components. Even checking
893
+ # the shape of the `components_` is problematic because the number of
894
+ # components depends on trimming threshold of the mle algorithm which
895
+ # can depend on device-specific rounding errors.
896
+ PCA(n_components="mle", svd_solver="full"),
897
+ ],
898
+ ids=_get_check_estimator_ids,
899
+ )
900
+ def test_pca_mle_array_api_compliance(
901
+ estimator, check, array_namespace, device, dtype_name
902
+ ):
903
+ name = estimator.__class__.__name__
904
+ check(name, estimator, array_namespace, device=device, dtype_name=dtype_name)
905
+
906
+ # Simpler variant of the generic check_array_api_input checker tailored for
907
+ # the specific case of PCA with mle-trimmed components.
908
+ xp = _array_api_for_tests(array_namespace, device)
909
+
910
+ X, y = make_classification(random_state=42)
911
+ X = X.astype(dtype_name, copy=False)
912
+ atol = _atol_for_type(X.dtype)
913
+
914
+ est = clone(estimator)
915
+
916
+ X_xp = xp.asarray(X, device=device)
917
+ y_xp = xp.asarray(y, device=device)
918
+
919
+ est.fit(X, y)
920
+
921
+ components_np = est.components_
922
+ explained_variance_np = est.explained_variance_
923
+
924
+ est_xp = clone(est)
925
+ with config_context(array_api_dispatch=True):
926
+ est_xp.fit(X_xp, y_xp)
927
+ components_xp = est_xp.components_
928
+ assert array_device(components_xp) == array_device(X_xp)
929
+ components_xp_np = _convert_to_numpy(components_xp, xp=xp)
930
+
931
+ explained_variance_xp = est_xp.explained_variance_
932
+ assert array_device(explained_variance_xp) == array_device(X_xp)
933
+ explained_variance_xp_np = _convert_to_numpy(explained_variance_xp, xp=xp)
934
+
935
+ assert components_xp_np.dtype == components_np.dtype
936
+ assert components_xp_np.shape[1] == components_np.shape[1]
937
+ assert explained_variance_xp_np.dtype == explained_variance_np.dtype
938
+
939
+ # Check that the explained variance values match for the
940
+ # common components:
941
+ min_components = min(components_xp_np.shape[0], components_np.shape[0])
942
+ assert_allclose(
943
+ explained_variance_xp_np[:min_components],
944
+ explained_variance_np[:min_components],
945
+ atol=atol,
946
+ )
947
+
948
+ # If the number of components differ, check that the explained variance of
949
+ # the trimmed components is very small.
950
+ if components_xp_np.shape[0] != components_np.shape[0]:
951
+ reference_variance = explained_variance_np[-1]
952
+ extra_variance_np = explained_variance_np[min_components:]
953
+ extra_variance_xp_np = explained_variance_xp_np[min_components:]
954
+ assert all(np.abs(extra_variance_np - reference_variance) < atol)
955
+ assert all(np.abs(extra_variance_xp_np - reference_variance) < atol)
956
+
957
+
958
+ def test_array_api_error_and_warnings_on_unsupported_params():
959
+ pytest.importorskip("array_api_compat")
960
+ xp = pytest.importorskip("numpy.array_api")
961
+ iris_xp = xp.asarray(iris.data)
962
+
963
+ pca = PCA(n_components=2, svd_solver="arpack", random_state=0)
964
+ expected_msg = re.escape(
965
+ "PCA with svd_solver='arpack' is not supported for Array API inputs."
966
+ )
967
+ with pytest.raises(ValueError, match=expected_msg):
968
+ with config_context(array_api_dispatch=True):
969
+ pca.fit(iris_xp)
970
+
971
+ pca.set_params(svd_solver="randomized", power_iteration_normalizer="LU")
972
+ expected_msg = re.escape(
973
+ "Array API does not support LU factorization. Set"
974
+ " `power_iteration_normalizer='QR'` instead."
975
+ )
976
+ with pytest.raises(ValueError, match=expected_msg):
977
+ with config_context(array_api_dispatch=True):
978
+ pca.fit(iris_xp)
979
+
980
+ pca.set_params(svd_solver="randomized", power_iteration_normalizer="auto")
981
+ expected_msg = re.escape(
982
+ "Array API does not support LU factorization, falling back to QR instead. Set"
983
+ " `power_iteration_normalizer='QR'` explicitly to silence this warning."
984
+ )
985
+ with pytest.warns(UserWarning, match=expected_msg):
986
+ with config_context(array_api_dispatch=True):
987
+ pca.fit(iris_xp)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.feature_extraction` module deals with feature extraction
3
+ from raw data. It currently includes methods to extract features from text and
4
+ images.
5
+ """
6
+
7
+ from . import text
8
+ from ._dict_vectorizer import DictVectorizer
9
+ from ._hash import FeatureHasher
10
+ from .image import grid_to_graph, img_to_graph
11
+
12
+ __all__ = [
13
+ "DictVectorizer",
14
+ "image",
15
+ "img_to_graph",
16
+ "grid_to_graph",
17
+ "text",
18
+ "FeatureHasher",
19
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (615 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc ADDED
Binary file (7.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc ADDED
Binary file (67.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Lars Buitinck
2
+ # Dan Blanchard <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ from array import array
6
+ from collections.abc import Iterable, Mapping
7
+ from numbers import Number
8
+ from operator import itemgetter
9
+
10
+ import numpy as np
11
+ import scipy.sparse as sp
12
+
13
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
14
+ from ..utils import check_array
15
+ from ..utils.validation import check_is_fitted
16
+
17
+
18
+ class DictVectorizer(TransformerMixin, BaseEstimator):
19
+ """Transforms lists of feature-value mappings to vectors.
20
+
21
+ This transformer turns lists of mappings (dict-like objects) of feature
22
+ names to feature values into Numpy arrays or scipy.sparse matrices for use
23
+ with scikit-learn estimators.
24
+
25
+ When feature values are strings, this transformer will do a binary one-hot
26
+ (aka one-of-K) coding: one boolean-valued feature is constructed for each
27
+ of the possible string values that the feature can take on. For instance,
28
+ a feature "f" that can take on the values "ham" and "spam" will become two
29
+ features in the output, one signifying "f=ham", the other "f=spam".
30
+
31
+ If a feature value is a sequence or set of strings, this transformer
32
+ will iterate over the values and will count the occurrences of each string
33
+ value.
34
+
35
+ However, note that this transformer will only do a binary one-hot encoding
36
+ when feature values are of type string. If categorical features are
37
+ represented as numeric values such as int or iterables of strings, the
38
+ DictVectorizer can be followed by
39
+ :class:`~sklearn.preprocessing.OneHotEncoder` to complete
40
+ binary one-hot encoding.
41
+
42
+ Features that do not occur in a sample (mapping) will have a zero value
43
+ in the resulting array/matrix.
44
+
45
+ For an efficiency comparison of the different feature extractors, see
46
+ :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
47
+
48
+ Read more in the :ref:`User Guide <dict_feature_extraction>`.
49
+
50
+ Parameters
51
+ ----------
52
+ dtype : dtype, default=np.float64
53
+ The type of feature values. Passed to Numpy array/scipy.sparse matrix
54
+ constructors as the dtype argument.
55
+ separator : str, default="="
56
+ Separator string used when constructing new features for one-hot
57
+ coding.
58
+ sparse : bool, default=True
59
+ Whether transform should produce scipy.sparse matrices.
60
+ sort : bool, default=True
61
+ Whether ``feature_names_`` and ``vocabulary_`` should be
62
+ sorted when fitting.
63
+
64
+ Attributes
65
+ ----------
66
+ vocabulary_ : dict
67
+ A dictionary mapping feature names to feature indices.
68
+
69
+ feature_names_ : list
70
+ A list of length n_features containing the feature names (e.g., "f=ham"
71
+ and "f=spam").
72
+
73
+ See Also
74
+ --------
75
+ FeatureHasher : Performs vectorization using only a hash function.
76
+ sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical
77
+ features encoded as columns of arbitrary data types.
78
+
79
+ Examples
80
+ --------
81
+ >>> from sklearn.feature_extraction import DictVectorizer
82
+ >>> v = DictVectorizer(sparse=False)
83
+ >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
84
+ >>> X = v.fit_transform(D)
85
+ >>> X
86
+ array([[2., 0., 1.],
87
+ [0., 1., 3.]])
88
+ >>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0},
89
+ ... {'baz': 1.0, 'foo': 3.0}]
90
+ True
91
+ >>> v.transform({'foo': 4, 'unseen_feature': 3})
92
+ array([[0., 0., 4.]])
93
+ """
94
+
95
+ _parameter_constraints: dict = {
96
+ "dtype": "no_validation", # validation delegated to numpy,
97
+ "separator": [str],
98
+ "sparse": ["boolean"],
99
+ "sort": ["boolean"],
100
+ }
101
+
102
+ def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True):
103
+ self.dtype = dtype
104
+ self.separator = separator
105
+ self.sparse = sparse
106
+ self.sort = sort
107
+
108
+ def _add_iterable_element(
109
+ self,
110
+ f,
111
+ v,
112
+ feature_names,
113
+ vocab,
114
+ *,
115
+ fitting=True,
116
+ transforming=False,
117
+ indices=None,
118
+ values=None,
119
+ ):
120
+ """Add feature names for iterable of strings"""
121
+ for vv in v:
122
+ if isinstance(vv, str):
123
+ feature_name = "%s%s%s" % (f, self.separator, vv)
124
+ vv = 1
125
+ else:
126
+ raise TypeError(
127
+ f"Unsupported type {type(vv)} in iterable "
128
+ "value. Only iterables of string are "
129
+ "supported."
130
+ )
131
+ if fitting and feature_name not in vocab:
132
+ vocab[feature_name] = len(feature_names)
133
+ feature_names.append(feature_name)
134
+
135
+ if transforming and feature_name in vocab:
136
+ indices.append(vocab[feature_name])
137
+ values.append(self.dtype(vv))
138
+
139
+ @_fit_context(prefer_skip_nested_validation=True)
140
+ def fit(self, X, y=None):
141
+ """Learn a list of feature name -> indices mappings.
142
+
143
+ Parameters
144
+ ----------
145
+ X : Mapping or iterable over Mappings
146
+ Dict(s) or Mapping(s) from feature names (arbitrary Python
147
+ objects) to feature values (strings or convertible to dtype).
148
+
149
+ .. versionchanged:: 0.24
150
+ Accepts multiple string values for one categorical feature.
151
+
152
+ y : (ignored)
153
+ Ignored parameter.
154
+
155
+ Returns
156
+ -------
157
+ self : object
158
+ DictVectorizer class instance.
159
+ """
160
+ feature_names = []
161
+ vocab = {}
162
+
163
+ for x in X:
164
+ for f, v in x.items():
165
+ if isinstance(v, str):
166
+ feature_name = "%s%s%s" % (f, self.separator, v)
167
+ elif isinstance(v, Number) or (v is None):
168
+ feature_name = f
169
+ elif isinstance(v, Mapping):
170
+ raise TypeError(
171
+ f"Unsupported value type {type(v)} "
172
+ f"for {f}: {v}.\n"
173
+ "Mapping objects are not supported."
174
+ )
175
+ elif isinstance(v, Iterable):
176
+ feature_name = None
177
+ self._add_iterable_element(f, v, feature_names, vocab)
178
+
179
+ if feature_name is not None:
180
+ if feature_name not in vocab:
181
+ vocab[feature_name] = len(feature_names)
182
+ feature_names.append(feature_name)
183
+
184
+ if self.sort:
185
+ feature_names.sort()
186
+ vocab = {f: i for i, f in enumerate(feature_names)}
187
+
188
+ self.feature_names_ = feature_names
189
+ self.vocabulary_ = vocab
190
+
191
+ return self
192
+
193
+ def _transform(self, X, fitting):
194
+ # Sanity check: Python's array has no way of explicitly requesting the
195
+ # signed 32-bit integers that scipy.sparse needs, so we use the next
196
+ # best thing: typecode "i" (int). However, if that gives larger or
197
+ # smaller integers than 32-bit ones, np.frombuffer screws up.
198
+ assert array("i").itemsize == 4, (
199
+ "sizeof(int) != 4 on your platform; please report this at"
200
+ " https://github.com/scikit-learn/scikit-learn/issues and"
201
+ " include the output from platform.platform() in your bug report"
202
+ )
203
+
204
+ dtype = self.dtype
205
+ if fitting:
206
+ feature_names = []
207
+ vocab = {}
208
+ else:
209
+ feature_names = self.feature_names_
210
+ vocab = self.vocabulary_
211
+
212
+ transforming = True
213
+
214
+ # Process everything as sparse regardless of setting
215
+ X = [X] if isinstance(X, Mapping) else X
216
+
217
+ indices = array("i")
218
+ indptr = [0]
219
+ # XXX we could change values to an array.array as well, but it
220
+ # would require (heuristic) conversion of dtype to typecode...
221
+ values = []
222
+
223
+ # collect all the possible feature names and build sparse matrix at
224
+ # same time
225
+ for x in X:
226
+ for f, v in x.items():
227
+ if isinstance(v, str):
228
+ feature_name = "%s%s%s" % (f, self.separator, v)
229
+ v = 1
230
+ elif isinstance(v, Number) or (v is None):
231
+ feature_name = f
232
+ elif not isinstance(v, Mapping) and isinstance(v, Iterable):
233
+ feature_name = None
234
+ self._add_iterable_element(
235
+ f,
236
+ v,
237
+ feature_names,
238
+ vocab,
239
+ fitting=fitting,
240
+ transforming=transforming,
241
+ indices=indices,
242
+ values=values,
243
+ )
244
+ else:
245
+ raise TypeError(
246
+ f"Unsupported value Type {type(v)} "
247
+ f"for {f}: {v}.\n"
248
+ f"{type(v)} objects are not supported."
249
+ )
250
+
251
+ if feature_name is not None:
252
+ if fitting and feature_name not in vocab:
253
+ vocab[feature_name] = len(feature_names)
254
+ feature_names.append(feature_name)
255
+
256
+ if feature_name in vocab:
257
+ indices.append(vocab[feature_name])
258
+ values.append(self.dtype(v))
259
+
260
+ indptr.append(len(indices))
261
+
262
+ if len(indptr) == 1:
263
+ raise ValueError("Sample sequence X is empty.")
264
+
265
+ indices = np.frombuffer(indices, dtype=np.intc)
266
+ shape = (len(indptr) - 1, len(vocab))
267
+
268
+ result_matrix = sp.csr_matrix(
269
+ (values, indices, indptr), shape=shape, dtype=dtype
270
+ )
271
+
272
+ # Sort everything if asked
273
+ if fitting and self.sort:
274
+ feature_names.sort()
275
+ map_index = np.empty(len(feature_names), dtype=np.int32)
276
+ for new_val, f in enumerate(feature_names):
277
+ map_index[new_val] = vocab[f]
278
+ vocab[f] = new_val
279
+ result_matrix = result_matrix[:, map_index]
280
+
281
+ if self.sparse:
282
+ result_matrix.sort_indices()
283
+ else:
284
+ result_matrix = result_matrix.toarray()
285
+
286
+ if fitting:
287
+ self.feature_names_ = feature_names
288
+ self.vocabulary_ = vocab
289
+
290
+ return result_matrix
291
+
292
+ @_fit_context(prefer_skip_nested_validation=True)
293
+ def fit_transform(self, X, y=None):
294
+ """Learn a list of feature name -> indices mappings and transform X.
295
+
296
+ Like fit(X) followed by transform(X), but does not require
297
+ materializing X in memory.
298
+
299
+ Parameters
300
+ ----------
301
+ X : Mapping or iterable over Mappings
302
+ Dict(s) or Mapping(s) from feature names (arbitrary Python
303
+ objects) to feature values (strings or convertible to dtype).
304
+
305
+ .. versionchanged:: 0.24
306
+ Accepts multiple string values for one categorical feature.
307
+
308
+ y : (ignored)
309
+ Ignored parameter.
310
+
311
+ Returns
312
+ -------
313
+ Xa : {array, sparse matrix}
314
+ Feature vectors; always 2-d.
315
+ """
316
+ return self._transform(X, fitting=True)
317
+
318
+ def inverse_transform(self, X, dict_type=dict):
319
+ """Transform array or sparse matrix X back to feature mappings.
320
+
321
+ X must have been produced by this DictVectorizer's transform or
322
+ fit_transform method; it may only have passed through transformers
323
+ that preserve the number of features and their order.
324
+
325
+ In the case of one-hot/one-of-K coding, the constructed feature
326
+ names and values are returned rather than the original ones.
327
+
328
+ Parameters
329
+ ----------
330
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
331
+ Sample matrix.
332
+ dict_type : type, default=dict
333
+ Constructor for feature mappings. Must conform to the
334
+ collections.Mapping API.
335
+
336
+ Returns
337
+ -------
338
+ D : list of dict_type objects of shape (n_samples,)
339
+ Feature mappings for the samples in X.
340
+ """
341
+ check_is_fitted(self, "feature_names_")
342
+
343
+ # COO matrix is not subscriptable
344
+ X = check_array(X, accept_sparse=["csr", "csc"])
345
+ n_samples = X.shape[0]
346
+
347
+ names = self.feature_names_
348
+ dicts = [dict_type() for _ in range(n_samples)]
349
+
350
+ if sp.issparse(X):
351
+ for i, j in zip(*X.nonzero()):
352
+ dicts[i][names[j]] = X[i, j]
353
+ else:
354
+ for i, d in enumerate(dicts):
355
+ for j, v in enumerate(X[i, :]):
356
+ if v != 0:
357
+ d[names[j]] = X[i, j]
358
+
359
+ return dicts
360
+
361
+ def transform(self, X):
362
+ """Transform feature->value dicts to array or sparse matrix.
363
+
364
+ Named features not encountered during fit or fit_transform will be
365
+ silently ignored.
366
+
367
+ Parameters
368
+ ----------
369
+ X : Mapping or iterable over Mappings of shape (n_samples,)
370
+ Dict(s) or Mapping(s) from feature names (arbitrary Python
371
+ objects) to feature values (strings or convertible to dtype).
372
+
373
+ Returns
374
+ -------
375
+ Xa : {array, sparse matrix}
376
+ Feature vectors; always 2-d.
377
+ """
378
+ check_is_fitted(self, ["feature_names_", "vocabulary_"])
379
+ return self._transform(X, fitting=False)
380
+
381
+ def get_feature_names_out(self, input_features=None):
382
+ """Get output feature names for transformation.
383
+
384
+ Parameters
385
+ ----------
386
+ input_features : array-like of str or None, default=None
387
+ Not used, present here for API consistency by convention.
388
+
389
+ Returns
390
+ -------
391
+ feature_names_out : ndarray of str objects
392
+ Transformed feature names.
393
+ """
394
+ check_is_fitted(self, "feature_names_")
395
+ if any(not isinstance(name, str) for name in self.feature_names_):
396
+ feature_names = [str(name) for name in self.feature_names_]
397
+ else:
398
+ feature_names = self.feature_names_
399
+ return np.asarray(feature_names, dtype=object)
400
+
401
+ def restrict(self, support, indices=False):
402
+ """Restrict the features to those in support using feature selection.
403
+
404
+ This function modifies the estimator in-place.
405
+
406
+ Parameters
407
+ ----------
408
+ support : array-like
409
+ Boolean mask or list of indices (as returned by the get_support
410
+ member of feature selectors).
411
+ indices : bool, default=False
412
+ Whether support is a list of indices.
413
+
414
+ Returns
415
+ -------
416
+ self : object
417
+ DictVectorizer class instance.
418
+
419
+ Examples
420
+ --------
421
+ >>> from sklearn.feature_extraction import DictVectorizer
422
+ >>> from sklearn.feature_selection import SelectKBest, chi2
423
+ >>> v = DictVectorizer()
424
+ >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
425
+ >>> X = v.fit_transform(D)
426
+ >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
427
+ >>> v.get_feature_names_out()
428
+ array(['bar', 'baz', 'foo'], ...)
429
+ >>> v.restrict(support.get_support())
430
+ DictVectorizer()
431
+ >>> v.get_feature_names_out()
432
+ array(['bar', 'foo'], ...)
433
+ """
434
+ check_is_fitted(self, "feature_names_")
435
+
436
+ if not indices:
437
+ support = np.where(support)[0]
438
+
439
+ names = self.feature_names_
440
+ new_vocab = {}
441
+ for i in support:
442
+ new_vocab[names[i]] = len(new_vocab)
443
+
444
+ self.vocabulary_ = new_vocab
445
+ self.feature_names_ = [
446
+ f for f, i in sorted(new_vocab.items(), key=itemgetter(1))
447
+ ]
448
+
449
+ return self
450
+
451
+ def _more_tags(self):
452
+ return {"X_types": ["dict"]}
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Lars Buitinck
2
+ # License: BSD 3 clause
3
+
4
+ from itertools import chain
5
+ from numbers import Integral
6
+
7
+ import numpy as np
8
+ import scipy.sparse as sp
9
+
10
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
11
+ from ..utils._param_validation import Interval, StrOptions
12
+ from ._hashing_fast import transform as _hashing_transform
13
+
14
+
15
+ def _iteritems(d):
16
+ """Like d.iteritems, but accepts any collections.Mapping."""
17
+ return d.iteritems() if hasattr(d, "iteritems") else d.items()
18
+
19
+
20
+ class FeatureHasher(TransformerMixin, BaseEstimator):
21
+ """Implements feature hashing, aka the hashing trick.
22
+
23
+ This class turns sequences of symbolic feature names (strings) into
24
+ scipy.sparse matrices, using a hash function to compute the matrix column
25
+ corresponding to a name. The hash function employed is the signed 32-bit
26
+ version of Murmurhash3.
27
+
28
+ Feature names of type byte string are used as-is. Unicode strings are
29
+ converted to UTF-8 first, but no Unicode normalization is done.
30
+ Feature values must be (finite) numbers.
31
+
32
+ This class is a low-memory alternative to DictVectorizer and
33
+ CountVectorizer, intended for large-scale (online) learning and situations
34
+ where memory is tight, e.g. when running prediction code on embedded
35
+ devices.
36
+
37
+ For an efficiency comparison of the different feature extractors, see
38
+ :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
39
+
40
+ Read more in the :ref:`User Guide <feature_hashing>`.
41
+
42
+ .. versionadded:: 0.13
43
+
44
+ Parameters
45
+ ----------
46
+ n_features : int, default=2**20
47
+ The number of features (columns) in the output matrices. Small numbers
48
+ of features are likely to cause hash collisions, but large numbers
49
+ will cause larger coefficient dimensions in linear learners.
50
+ input_type : str, default='dict'
51
+ Choose a string from {'dict', 'pair', 'string'}.
52
+ Either "dict" (the default) to accept dictionaries over
53
+ (feature_name, value); "pair" to accept pairs of (feature_name, value);
54
+ or "string" to accept single strings.
55
+ feature_name should be a string, while value should be a number.
56
+ In the case of "string", a value of 1 is implied.
57
+ The feature_name is hashed to find the appropriate column for the
58
+ feature. The value's sign might be flipped in the output (but see
59
+ non_negative, below).
60
+ dtype : numpy dtype, default=np.float64
61
+ The type of feature values. Passed to scipy.sparse matrix constructors
62
+ as the dtype argument. Do not set this to bool, np.boolean or any
63
+ unsigned integer type.
64
+ alternate_sign : bool, default=True
65
+ When True, an alternating sign is added to the features as to
66
+ approximately conserve the inner product in the hashed space even for
67
+ small n_features. This approach is similar to sparse random projection.
68
+
69
+ .. versionchanged:: 0.19
70
+ ``alternate_sign`` replaces the now deprecated ``non_negative``
71
+ parameter.
72
+
73
+ See Also
74
+ --------
75
+ DictVectorizer : Vectorizes string-valued features using a hash table.
76
+ sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features.
77
+
78
+ Notes
79
+ -----
80
+ This estimator is :term:`stateless` and does not need to be fitted.
81
+ However, we recommend to call :meth:`fit_transform` instead of
82
+ :meth:`transform`, as parameter validation is only performed in
83
+ :meth:`fit`.
84
+
85
+ Examples
86
+ --------
87
+ >>> from sklearn.feature_extraction import FeatureHasher
88
+ >>> h = FeatureHasher(n_features=10)
89
+ >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
90
+ >>> f = h.transform(D)
91
+ >>> f.toarray()
92
+ array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
93
+ [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
94
+
95
+ With `input_type="string"`, the input must be an iterable over iterables of
96
+ strings:
97
+
98
+ >>> h = FeatureHasher(n_features=8, input_type="string")
99
+ >>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]]
100
+ >>> f = h.transform(raw_X)
101
+ >>> f.toarray()
102
+ array([[ 0., 0., 0., -1., 0., -1., 0., 1.],
103
+ [ 0., 0., 0., -1., 0., -1., 0., 0.],
104
+ [ 0., -1., 0., 0., 0., 0., 0., 1.]])
105
+ """
106
+
107
+ _parameter_constraints: dict = {
108
+ "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")],
109
+ "input_type": [StrOptions({"dict", "pair", "string"})],
110
+ "dtype": "no_validation", # delegate to numpy
111
+ "alternate_sign": ["boolean"],
112
+ }
113
+
114
+ def __init__(
115
+ self,
116
+ n_features=(2**20),
117
+ *,
118
+ input_type="dict",
119
+ dtype=np.float64,
120
+ alternate_sign=True,
121
+ ):
122
+ self.dtype = dtype
123
+ self.input_type = input_type
124
+ self.n_features = n_features
125
+ self.alternate_sign = alternate_sign
126
+
127
+ @_fit_context(prefer_skip_nested_validation=True)
128
+ def fit(self, X=None, y=None):
129
+ """Only validates estimator's parameters.
130
+
131
+ This method allows to: (i) validate the estimator's parameters and
132
+ (ii) be consistent with the scikit-learn transformer API.
133
+
134
+ Parameters
135
+ ----------
136
+ X : Ignored
137
+ Not used, present here for API consistency by convention.
138
+
139
+ y : Ignored
140
+ Not used, present here for API consistency by convention.
141
+
142
+ Returns
143
+ -------
144
+ self : object
145
+ FeatureHasher class instance.
146
+ """
147
+ return self
148
+
149
+ def transform(self, raw_X):
150
+ """Transform a sequence of instances to a scipy.sparse matrix.
151
+
152
+ Parameters
153
+ ----------
154
+ raw_X : iterable over iterable over raw features, length = n_samples
155
+ Samples. Each sample must be iterable an (e.g., a list or tuple)
156
+ containing/generating feature names (and optionally values, see
157
+ the input_type constructor argument) which will be hashed.
158
+ raw_X need not support the len function, so it can be the result
159
+ of a generator; n_samples is determined on the fly.
160
+
161
+ Returns
162
+ -------
163
+ X : sparse matrix of shape (n_samples, n_features)
164
+ Feature matrix, for use with estimators or further transformers.
165
+ """
166
+ raw_X = iter(raw_X)
167
+ if self.input_type == "dict":
168
+ raw_X = (_iteritems(d) for d in raw_X)
169
+ elif self.input_type == "string":
170
+ first_raw_X = next(raw_X)
171
+ if isinstance(first_raw_X, str):
172
+ raise ValueError(
173
+ "Samples can not be a single string. The input must be an iterable"
174
+ " over iterables of strings."
175
+ )
176
+ raw_X_ = chain([first_raw_X], raw_X)
177
+ raw_X = (((f, 1) for f in x) for x in raw_X_)
178
+
179
+ indices, indptr, values = _hashing_transform(
180
+ raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
181
+ )
182
+ n_samples = indptr.shape[0] - 1
183
+
184
+ if n_samples == 0:
185
+ raise ValueError("Cannot vectorize empty sequence.")
186
+
187
+ X = sp.csr_matrix(
188
+ (values, indices, indptr),
189
+ dtype=self.dtype,
190
+ shape=(n_samples, self.n_features),
191
+ )
192
+ X.sum_duplicates() # also sorts the indices
193
+
194
+ return X
195
+
196
+ def _more_tags(self):
197
+ return {"X_types": [self.input_type]}
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (110 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This list of English stop words is taken from the "Glasgow Information
2
+ # Retrieval Group". The original list can be found at
3
+ # http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
4
+ ENGLISH_STOP_WORDS = frozenset(
5
+ [
6
+ "a",
7
+ "about",
8
+ "above",
9
+ "across",
10
+ "after",
11
+ "afterwards",
12
+ "again",
13
+ "against",
14
+ "all",
15
+ "almost",
16
+ "alone",
17
+ "along",
18
+ "already",
19
+ "also",
20
+ "although",
21
+ "always",
22
+ "am",
23
+ "among",
24
+ "amongst",
25
+ "amoungst",
26
+ "amount",
27
+ "an",
28
+ "and",
29
+ "another",
30
+ "any",
31
+ "anyhow",
32
+ "anyone",
33
+ "anything",
34
+ "anyway",
35
+ "anywhere",
36
+ "are",
37
+ "around",
38
+ "as",
39
+ "at",
40
+ "back",
41
+ "be",
42
+ "became",
43
+ "because",
44
+ "become",
45
+ "becomes",
46
+ "becoming",
47
+ "been",
48
+ "before",
49
+ "beforehand",
50
+ "behind",
51
+ "being",
52
+ "below",
53
+ "beside",
54
+ "besides",
55
+ "between",
56
+ "beyond",
57
+ "bill",
58
+ "both",
59
+ "bottom",
60
+ "but",
61
+ "by",
62
+ "call",
63
+ "can",
64
+ "cannot",
65
+ "cant",
66
+ "co",
67
+ "con",
68
+ "could",
69
+ "couldnt",
70
+ "cry",
71
+ "de",
72
+ "describe",
73
+ "detail",
74
+ "do",
75
+ "done",
76
+ "down",
77
+ "due",
78
+ "during",
79
+ "each",
80
+ "eg",
81
+ "eight",
82
+ "either",
83
+ "eleven",
84
+ "else",
85
+ "elsewhere",
86
+ "empty",
87
+ "enough",
88
+ "etc",
89
+ "even",
90
+ "ever",
91
+ "every",
92
+ "everyone",
93
+ "everything",
94
+ "everywhere",
95
+ "except",
96
+ "few",
97
+ "fifteen",
98
+ "fifty",
99
+ "fill",
100
+ "find",
101
+ "fire",
102
+ "first",
103
+ "five",
104
+ "for",
105
+ "former",
106
+ "formerly",
107
+ "forty",
108
+ "found",
109
+ "four",
110
+ "from",
111
+ "front",
112
+ "full",
113
+ "further",
114
+ "get",
115
+ "give",
116
+ "go",
117
+ "had",
118
+ "has",
119
+ "hasnt",
120
+ "have",
121
+ "he",
122
+ "hence",
123
+ "her",
124
+ "here",
125
+ "hereafter",
126
+ "hereby",
127
+ "herein",
128
+ "hereupon",
129
+ "hers",
130
+ "herself",
131
+ "him",
132
+ "himself",
133
+ "his",
134
+ "how",
135
+ "however",
136
+ "hundred",
137
+ "i",
138
+ "ie",
139
+ "if",
140
+ "in",
141
+ "inc",
142
+ "indeed",
143
+ "interest",
144
+ "into",
145
+ "is",
146
+ "it",
147
+ "its",
148
+ "itself",
149
+ "keep",
150
+ "last",
151
+ "latter",
152
+ "latterly",
153
+ "least",
154
+ "less",
155
+ "ltd",
156
+ "made",
157
+ "many",
158
+ "may",
159
+ "me",
160
+ "meanwhile",
161
+ "might",
162
+ "mill",
163
+ "mine",
164
+ "more",
165
+ "moreover",
166
+ "most",
167
+ "mostly",
168
+ "move",
169
+ "much",
170
+ "must",
171
+ "my",
172
+ "myself",
173
+ "name",
174
+ "namely",
175
+ "neither",
176
+ "never",
177
+ "nevertheless",
178
+ "next",
179
+ "nine",
180
+ "no",
181
+ "nobody",
182
+ "none",
183
+ "noone",
184
+ "nor",
185
+ "not",
186
+ "nothing",
187
+ "now",
188
+ "nowhere",
189
+ "of",
190
+ "off",
191
+ "often",
192
+ "on",
193
+ "once",
194
+ "one",
195
+ "only",
196
+ "onto",
197
+ "or",
198
+ "other",
199
+ "others",
200
+ "otherwise",
201
+ "our",
202
+ "ours",
203
+ "ourselves",
204
+ "out",
205
+ "over",
206
+ "own",
207
+ "part",
208
+ "per",
209
+ "perhaps",
210
+ "please",
211
+ "put",
212
+ "rather",
213
+ "re",
214
+ "same",
215
+ "see",
216
+ "seem",
217
+ "seemed",
218
+ "seeming",
219
+ "seems",
220
+ "serious",
221
+ "several",
222
+ "she",
223
+ "should",
224
+ "show",
225
+ "side",
226
+ "since",
227
+ "sincere",
228
+ "six",
229
+ "sixty",
230
+ "so",
231
+ "some",
232
+ "somehow",
233
+ "someone",
234
+ "something",
235
+ "sometime",
236
+ "sometimes",
237
+ "somewhere",
238
+ "still",
239
+ "such",
240
+ "system",
241
+ "take",
242
+ "ten",
243
+ "than",
244
+ "that",
245
+ "the",
246
+ "their",
247
+ "them",
248
+ "themselves",
249
+ "then",
250
+ "thence",
251
+ "there",
252
+ "thereafter",
253
+ "thereby",
254
+ "therefore",
255
+ "therein",
256
+ "thereupon",
257
+ "these",
258
+ "they",
259
+ "thick",
260
+ "thin",
261
+ "third",
262
+ "this",
263
+ "those",
264
+ "though",
265
+ "three",
266
+ "through",
267
+ "throughout",
268
+ "thru",
269
+ "thus",
270
+ "to",
271
+ "together",
272
+ "too",
273
+ "top",
274
+ "toward",
275
+ "towards",
276
+ "twelve",
277
+ "twenty",
278
+ "two",
279
+ "un",
280
+ "under",
281
+ "until",
282
+ "up",
283
+ "upon",
284
+ "us",
285
+ "very",
286
+ "via",
287
+ "was",
288
+ "we",
289
+ "well",
290
+ "were",
291
+ "what",
292
+ "whatever",
293
+ "when",
294
+ "whence",
295
+ "whenever",
296
+ "where",
297
+ "whereafter",
298
+ "whereas",
299
+ "whereby",
300
+ "wherein",
301
+ "whereupon",
302
+ "wherever",
303
+ "whether",
304
+ "which",
305
+ "while",
306
+ "whither",
307
+ "who",
308
+ "whoever",
309
+ "whole",
310
+ "whom",
311
+ "whose",
312
+ "why",
313
+ "will",
314
+ "with",
315
+ "within",
316
+ "without",
317
+ "would",
318
+ "yet",
319
+ "you",
320
+ "your",
321
+ "yours",
322
+ "yourself",
323
+ "yourselves",
324
+ ]
325
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/image.py ADDED
@@ -0,0 +1,671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
3
+ extract features from images.
4
+ """
5
+
6
+ # Authors: Emmanuelle Gouillart <[email protected]>
7
+ # Gael Varoquaux <[email protected]>
8
+ # Olivier Grisel
9
+ # Vlad Niculae
10
+ # License: BSD 3 clause
11
+
12
+ from itertools import product
13
+ from numbers import Integral, Number, Real
14
+
15
+ import numpy as np
16
+ from numpy.lib.stride_tricks import as_strided
17
+ from scipy import sparse
18
+
19
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
20
+ from ..utils import check_array, check_random_state
21
+ from ..utils._param_validation import Hidden, Interval, RealNotInt, validate_params
22
+
23
+ __all__ = [
24
+ "PatchExtractor",
25
+ "extract_patches_2d",
26
+ "grid_to_graph",
27
+ "img_to_graph",
28
+ "reconstruct_from_patches_2d",
29
+ ]
30
+
31
+ ###############################################################################
32
+ # From an image to a graph
33
+
34
+
35
+ def _make_edges_3d(n_x, n_y, n_z=1):
36
+ """Returns a list of edges for a 3D image.
37
+
38
+ Parameters
39
+ ----------
40
+ n_x : int
41
+ The size of the grid in the x direction.
42
+ n_y : int
43
+ The size of the grid in the y direction.
44
+ n_z : integer, default=1
45
+ The size of the grid in the z direction, defaults to 1
46
+ """
47
+ vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
48
+ edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))
49
+ edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))
50
+ edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
51
+ edges = np.hstack((edges_deep, edges_right, edges_down))
52
+ return edges
53
+
54
+
55
+ def _compute_gradient_3d(edges, img):
56
+ _, n_y, n_z = img.shape
57
+ gradient = np.abs(
58
+ img[
59
+ edges[0] // (n_y * n_z),
60
+ (edges[0] % (n_y * n_z)) // n_z,
61
+ (edges[0] % (n_y * n_z)) % n_z,
62
+ ]
63
+ - img[
64
+ edges[1] // (n_y * n_z),
65
+ (edges[1] % (n_y * n_z)) // n_z,
66
+ (edges[1] % (n_y * n_z)) % n_z,
67
+ ]
68
+ )
69
+ return gradient
70
+
71
+
72
+ # XXX: Why mask the image after computing the weights?
73
+
74
+
75
+ def _mask_edges_weights(mask, edges, weights=None):
76
+ """Apply a mask to edges (weighted or not)"""
77
+ inds = np.arange(mask.size)
78
+ inds = inds[mask.ravel()]
79
+ ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds))
80
+ edges = edges[:, ind_mask]
81
+ if weights is not None:
82
+ weights = weights[ind_mask]
83
+ if len(edges.ravel()):
84
+ maxval = edges.max()
85
+ else:
86
+ maxval = 0
87
+ order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))
88
+ edges = order[edges]
89
+ if weights is None:
90
+ return edges
91
+ else:
92
+ return edges, weights
93
+
94
+
95
+ def _to_graph(
96
+ n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None
97
+ ):
98
+ """Auxiliary function for img_to_graph and grid_to_graph"""
99
+ edges = _make_edges_3d(n_x, n_y, n_z)
100
+
101
+ if dtype is None: # To not overwrite input dtype
102
+ if img is None:
103
+ dtype = int
104
+ else:
105
+ dtype = img.dtype
106
+
107
+ if img is not None:
108
+ img = np.atleast_3d(img)
109
+ weights = _compute_gradient_3d(edges, img)
110
+ if mask is not None:
111
+ edges, weights = _mask_edges_weights(mask, edges, weights)
112
+ diag = img.squeeze()[mask]
113
+ else:
114
+ diag = img.ravel()
115
+ n_voxels = diag.size
116
+ else:
117
+ if mask is not None:
118
+ mask = mask.astype(dtype=bool, copy=False)
119
+ edges = _mask_edges_weights(mask, edges)
120
+ n_voxels = np.sum(mask)
121
+ else:
122
+ n_voxels = n_x * n_y * n_z
123
+ weights = np.ones(edges.shape[1], dtype=dtype)
124
+ diag = np.ones(n_voxels, dtype=dtype)
125
+
126
+ diag_idx = np.arange(n_voxels)
127
+ i_idx = np.hstack((edges[0], edges[1]))
128
+ j_idx = np.hstack((edges[1], edges[0]))
129
+ graph = sparse.coo_matrix(
130
+ (
131
+ np.hstack((weights, weights, diag)),
132
+ (np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))),
133
+ ),
134
+ (n_voxels, n_voxels),
135
+ dtype=dtype,
136
+ )
137
+ if return_as is np.ndarray:
138
+ return graph.toarray()
139
+ return return_as(graph)
140
+
141
+
142
+ @validate_params(
143
+ {
144
+ "img": ["array-like"],
145
+ "mask": [None, np.ndarray],
146
+ "return_as": [type],
147
+ "dtype": "no_validation", # validation delegated to numpy
148
+ },
149
+ prefer_skip_nested_validation=True,
150
+ )
151
+ def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
152
+ """Graph of the pixel-to-pixel gradient connections.
153
+
154
+ Edges are weighted with the gradient values.
155
+
156
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
157
+
158
+ Parameters
159
+ ----------
160
+ img : array-like of shape (height, width) or (height, width, channel)
161
+ 2D or 3D image.
162
+ mask : ndarray of shape (height, width) or \
163
+ (height, width, channel), dtype=bool, default=None
164
+ An optional mask of the image, to consider only part of the
165
+ pixels.
166
+ return_as : np.ndarray or a sparse matrix class, \
167
+ default=sparse.coo_matrix
168
+ The class to use to build the returned adjacency matrix.
169
+ dtype : dtype, default=None
170
+ The data of the returned sparse matrix. By default it is the
171
+ dtype of img.
172
+
173
+ Returns
174
+ -------
175
+ graph : ndarray or a sparse matrix class
176
+ The computed adjacency matrix.
177
+
178
+ Notes
179
+ -----
180
+ For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
181
+ handled by returning a dense np.matrix instance. Going forward, np.ndarray
182
+ returns an np.ndarray, as expected.
183
+
184
+ For compatibility, user code relying on this method should wrap its
185
+ calls in ``np.asarray`` to avoid type issues.
186
+ """
187
+ img = np.atleast_3d(img)
188
+ n_x, n_y, n_z = img.shape
189
+ return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
190
+
191
+
192
+ @validate_params(
193
+ {
194
+ "n_x": [Interval(Integral, left=1, right=None, closed="left")],
195
+ "n_y": [Interval(Integral, left=1, right=None, closed="left")],
196
+ "n_z": [Interval(Integral, left=1, right=None, closed="left")],
197
+ "mask": [None, np.ndarray],
198
+ "return_as": [type],
199
+ "dtype": "no_validation", # validation delegated to numpy
200
+ },
201
+ prefer_skip_nested_validation=True,
202
+ )
203
+ def grid_to_graph(
204
+ n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int
205
+ ):
206
+ """Graph of the pixel-to-pixel connections.
207
+
208
+ Edges exist if 2 voxels are connected.
209
+
210
+ Parameters
211
+ ----------
212
+ n_x : int
213
+ Dimension in x axis.
214
+ n_y : int
215
+ Dimension in y axis.
216
+ n_z : int, default=1
217
+ Dimension in z axis.
218
+ mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
219
+ An optional mask of the image, to consider only part of the
220
+ pixels.
221
+ return_as : np.ndarray or a sparse matrix class, \
222
+ default=sparse.coo_matrix
223
+ The class to use to build the returned adjacency matrix.
224
+ dtype : dtype, default=int
225
+ The data of the returned sparse matrix. By default it is int.
226
+
227
+ Returns
228
+ -------
229
+ graph : np.ndarray or a sparse matrix class
230
+ The computed adjacency matrix.
231
+
232
+ Notes
233
+ -----
234
+ For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
235
+ handled by returning a dense np.matrix instance. Going forward, np.ndarray
236
+ returns an np.ndarray, as expected.
237
+
238
+ For compatibility, user code relying on this method should wrap its
239
+ calls in ``np.asarray`` to avoid type issues.
240
+
241
+ Examples
242
+ --------
243
+ >>> import numpy as np
244
+ >>> from sklearn.feature_extraction.image import grid_to_graph
245
+ >>> shape_img = (4, 4, 1)
246
+ >>> mask = np.zeros(shape=shape_img, dtype=bool)
247
+ >>> mask[[1, 2], [1, 2], :] = True
248
+ >>> graph = grid_to_graph(*shape_img, mask=mask)
249
+ >>> print(graph)
250
+ (0, 0) 1
251
+ (1, 1) 1
252
+ """
253
+ return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
254
+
255
+
256
+ ###############################################################################
257
+ # From an image to a set of small image patches
258
+
259
+
260
+ def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
261
+ """Compute the number of patches that will be extracted in an image.
262
+
263
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
264
+
265
+ Parameters
266
+ ----------
267
+ i_h : int
268
+ The image height
269
+ i_w : int
270
+ The image with
271
+ p_h : int
272
+ The height of a patch
273
+ p_w : int
274
+ The width of a patch
275
+ max_patches : int or float, default=None
276
+ The maximum number of patches to extract. If `max_patches` is a float
277
+ between 0 and 1, it is taken to be a proportion of the total number
278
+ of patches. If `max_patches` is None, all possible patches are extracted.
279
+ """
280
+ n_h = i_h - p_h + 1
281
+ n_w = i_w - p_w + 1
282
+ all_patches = n_h * n_w
283
+
284
+ if max_patches:
285
+ if isinstance(max_patches, (Integral)) and max_patches < all_patches:
286
+ return max_patches
287
+ elif isinstance(max_patches, (Integral)) and max_patches >= all_patches:
288
+ return all_patches
289
+ elif isinstance(max_patches, (Real)) and 0 < max_patches < 1:
290
+ return int(max_patches * all_patches)
291
+ else:
292
+ raise ValueError("Invalid value for max_patches: %r" % max_patches)
293
+ else:
294
+ return all_patches
295
+
296
+
297
+ def _extract_patches(arr, patch_shape=8, extraction_step=1):
298
+ """Extracts patches of any n-dimensional array in place using strides.
299
+
300
+ Given an n-dimensional array it will return a 2n-dimensional array with
301
+ the first n dimensions indexing patch position and the last n indexing
302
+ the patch content. This operation is immediate (O(1)). A reshape
303
+ performed on the first n dimensions will cause numpy to copy data, leading
304
+ to a list of extracted patches.
305
+
306
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
307
+
308
+ Parameters
309
+ ----------
310
+ arr : ndarray
311
+ n-dimensional array of which patches are to be extracted
312
+
313
+ patch_shape : int or tuple of length arr.ndim.default=8
314
+ Indicates the shape of the patches to be extracted. If an
315
+ integer is given, the shape will be a hypercube of
316
+ sidelength given by its value.
317
+
318
+ extraction_step : int or tuple of length arr.ndim, default=1
319
+ Indicates step size at which extraction shall be performed.
320
+ If integer is given, then the step is uniform in all dimensions.
321
+
322
+
323
+ Returns
324
+ -------
325
+ patches : strided ndarray
326
+ 2n-dimensional array indexing patches on first n dimensions and
327
+ containing patches on the last n dimensions. These dimensions
328
+ are fake, but this way no data is copied. A simple reshape invokes
329
+ a copying operation to obtain a list of patches:
330
+ result.reshape([-1] + list(patch_shape))
331
+ """
332
+
333
+ arr_ndim = arr.ndim
334
+
335
+ if isinstance(patch_shape, Number):
336
+ patch_shape = tuple([patch_shape] * arr_ndim)
337
+ if isinstance(extraction_step, Number):
338
+ extraction_step = tuple([extraction_step] * arr_ndim)
339
+
340
+ patch_strides = arr.strides
341
+
342
+ slices = tuple(slice(None, None, st) for st in extraction_step)
343
+ indexing_strides = arr[slices].strides
344
+
345
+ patch_indices_shape = (
346
+ (np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
347
+ ) + 1
348
+
349
+ shape = tuple(list(patch_indices_shape) + list(patch_shape))
350
+ strides = tuple(list(indexing_strides) + list(patch_strides))
351
+
352
+ patches = as_strided(arr, shape=shape, strides=strides)
353
+ return patches
354
+
355
+
356
+ @validate_params(
357
+ {
358
+ "image": [np.ndarray],
359
+ "patch_size": [tuple, list],
360
+ "max_patches": [
361
+ Interval(RealNotInt, 0, 1, closed="neither"),
362
+ Interval(Integral, 1, None, closed="left"),
363
+ None,
364
+ ],
365
+ "random_state": ["random_state"],
366
+ },
367
+ prefer_skip_nested_validation=True,
368
+ )
369
+ def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
370
+ """Reshape a 2D image into a collection of patches.
371
+
372
+ The resulting patches are allocated in a dedicated array.
373
+
374
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
375
+
376
+ Parameters
377
+ ----------
378
+ image : ndarray of shape (image_height, image_width) or \
379
+ (image_height, image_width, n_channels)
380
+ The original image data. For color images, the last dimension specifies
381
+ the channel: a RGB image would have `n_channels=3`.
382
+
383
+ patch_size : tuple of int (patch_height, patch_width)
384
+ The dimensions of one patch.
385
+
386
+ max_patches : int or float, default=None
387
+ The maximum number of patches to extract. If `max_patches` is a float
388
+ between 0 and 1, it is taken to be a proportion of the total number
389
+ of patches. If `max_patches` is None it corresponds to the total number
390
+ of patches that can be extracted.
391
+
392
+ random_state : int, RandomState instance, default=None
393
+ Determines the random number generator used for random sampling when
394
+ `max_patches` is not None. Use an int to make the randomness
395
+ deterministic.
396
+ See :term:`Glossary <random_state>`.
397
+
398
+ Returns
399
+ -------
400
+ patches : array of shape (n_patches, patch_height, patch_width) or \
401
+ (n_patches, patch_height, patch_width, n_channels)
402
+ The collection of patches extracted from the image, where `n_patches`
403
+ is either `max_patches` or the total number of patches that can be
404
+ extracted.
405
+
406
+ Examples
407
+ --------
408
+ >>> from sklearn.datasets import load_sample_image
409
+ >>> from sklearn.feature_extraction import image
410
+ >>> # Use the array data from the first image in this dataset:
411
+ >>> one_image = load_sample_image("china.jpg")
412
+ >>> print('Image shape: {}'.format(one_image.shape))
413
+ Image shape: (427, 640, 3)
414
+ >>> patches = image.extract_patches_2d(one_image, (2, 2))
415
+ >>> print('Patches shape: {}'.format(patches.shape))
416
+ Patches shape: (272214, 2, 2, 3)
417
+ >>> # Here are just two of these patches:
418
+ >>> print(patches[1])
419
+ [[[174 201 231]
420
+ [174 201 231]]
421
+ [[173 200 230]
422
+ [173 200 230]]]
423
+ >>> print(patches[800])
424
+ [[[187 214 243]
425
+ [188 215 244]]
426
+ [[187 214 243]
427
+ [188 215 244]]]
428
+ """
429
+ i_h, i_w = image.shape[:2]
430
+ p_h, p_w = patch_size
431
+
432
+ if p_h > i_h:
433
+ raise ValueError(
434
+ "Height of the patch should be less than the height of the image."
435
+ )
436
+
437
+ if p_w > i_w:
438
+ raise ValueError(
439
+ "Width of the patch should be less than the width of the image."
440
+ )
441
+
442
+ image = check_array(image, allow_nd=True)
443
+ image = image.reshape((i_h, i_w, -1))
444
+ n_colors = image.shape[-1]
445
+
446
+ extracted_patches = _extract_patches(
447
+ image, patch_shape=(p_h, p_w, n_colors), extraction_step=1
448
+ )
449
+
450
+ n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
451
+ if max_patches:
452
+ rng = check_random_state(random_state)
453
+ i_s = rng.randint(i_h - p_h + 1, size=n_patches)
454
+ j_s = rng.randint(i_w - p_w + 1, size=n_patches)
455
+ patches = extracted_patches[i_s, j_s, 0]
456
+ else:
457
+ patches = extracted_patches
458
+
459
+ patches = patches.reshape(-1, p_h, p_w, n_colors)
460
+ # remove the color dimension if useless
461
+ if patches.shape[-1] == 1:
462
+ return patches.reshape((n_patches, p_h, p_w))
463
+ else:
464
+ return patches
465
+
466
+
467
+ @validate_params(
468
+ {"patches": [np.ndarray], "image_size": [tuple, Hidden(list)]},
469
+ prefer_skip_nested_validation=True,
470
+ )
471
+ def reconstruct_from_patches_2d(patches, image_size):
472
+ """Reconstruct the image from all of its patches.
473
+
474
+ Patches are assumed to overlap and the image is constructed by filling in
475
+ the patches from left to right, top to bottom, averaging the overlapping
476
+ regions.
477
+
478
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
479
+
480
+ Parameters
481
+ ----------
482
+ patches : ndarray of shape (n_patches, patch_height, patch_width) or \
483
+ (n_patches, patch_height, patch_width, n_channels)
484
+ The complete set of patches. If the patches contain colour information,
485
+ channels are indexed along the last dimension: RGB patches would
486
+ have `n_channels=3`.
487
+
488
+ image_size : tuple of int (image_height, image_width) or \
489
+ (image_height, image_width, n_channels)
490
+ The size of the image that will be reconstructed.
491
+
492
+ Returns
493
+ -------
494
+ image : ndarray of shape image_size
495
+ The reconstructed image.
496
+ """
497
+ i_h, i_w = image_size[:2]
498
+ p_h, p_w = patches.shape[1:3]
499
+ img = np.zeros(image_size)
500
+ # compute the dimensions of the patches array
501
+ n_h = i_h - p_h + 1
502
+ n_w = i_w - p_w + 1
503
+ for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
504
+ img[i : i + p_h, j : j + p_w] += p
505
+
506
+ for i in range(i_h):
507
+ for j in range(i_w):
508
+ # divide by the amount of overlap
509
+ # XXX: is this the most efficient way? memory-wise yes, cpu wise?
510
+ img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))
511
+ return img
512
+
513
+
514
+ class PatchExtractor(TransformerMixin, BaseEstimator):
515
+ """Extracts patches from a collection of images.
516
+
517
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
518
+
519
+ .. versionadded:: 0.9
520
+
521
+ Parameters
522
+ ----------
523
+ patch_size : tuple of int (patch_height, patch_width), default=None
524
+ The dimensions of one patch. If set to None, the patch size will be
525
+ automatically set to `(img_height // 10, img_width // 10)`, where
526
+ `img_height` and `img_width` are the dimensions of the input images.
527
+
528
+ max_patches : int or float, default=None
529
+ The maximum number of patches per image to extract. If `max_patches` is
530
+ a float in (0, 1), it is taken to mean a proportion of the total number
531
+ of patches. If set to None, extract all possible patches.
532
+
533
+ random_state : int, RandomState instance, default=None
534
+ Determines the random number generator used for random sampling when
535
+ `max_patches is not None`. Use an int to make the randomness
536
+ deterministic.
537
+ See :term:`Glossary <random_state>`.
538
+
539
+ See Also
540
+ --------
541
+ reconstruct_from_patches_2d : Reconstruct image from all of its patches.
542
+
543
+ Notes
544
+ -----
545
+ This estimator is stateless and does not need to be fitted. However, we
546
+ recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
547
+ parameter validation is only performed in :meth:`fit`.
548
+
549
+ Examples
550
+ --------
551
+ >>> from sklearn.datasets import load_sample_images
552
+ >>> from sklearn.feature_extraction import image
553
+ >>> # Use the array data from the second image in this dataset:
554
+ >>> X = load_sample_images().images[1]
555
+ >>> X = X[None, ...]
556
+ >>> print(f"Image shape: {X.shape}")
557
+ Image shape: (1, 427, 640, 3)
558
+ >>> pe = image.PatchExtractor(patch_size=(10, 10))
559
+ >>> pe_trans = pe.transform(X)
560
+ >>> print(f"Patches shape: {pe_trans.shape}")
561
+ Patches shape: (263758, 10, 10, 3)
562
+ >>> X_reconstructed = image.reconstruct_from_patches_2d(pe_trans, X.shape[1:])
563
+ >>> print(f"Reconstructed shape: {X_reconstructed.shape}")
564
+ Reconstructed shape: (427, 640, 3)
565
+ """
566
+
567
+ _parameter_constraints: dict = {
568
+ "patch_size": [tuple, None],
569
+ "max_patches": [
570
+ None,
571
+ Interval(RealNotInt, 0, 1, closed="neither"),
572
+ Interval(Integral, 1, None, closed="left"),
573
+ ],
574
+ "random_state": ["random_state"],
575
+ }
576
+
577
+ def __init__(self, *, patch_size=None, max_patches=None, random_state=None):
578
+ self.patch_size = patch_size
579
+ self.max_patches = max_patches
580
+ self.random_state = random_state
581
+
582
+ @_fit_context(prefer_skip_nested_validation=True)
583
+ def fit(self, X, y=None):
584
+ """Only validate the parameters of the estimator.
585
+
586
+ This method allows to: (i) validate the parameters of the estimator and
587
+ (ii) be consistent with the scikit-learn transformer API.
588
+
589
+ Parameters
590
+ ----------
591
+ X : ndarray of shape (n_samples, image_height, image_width) or \
592
+ (n_samples, image_height, image_width, n_channels)
593
+ Array of images from which to extract patches. For color images,
594
+ the last dimension specifies the channel: a RGB image would have
595
+ `n_channels=3`.
596
+
597
+ y : Ignored
598
+ Not used, present for API consistency by convention.
599
+
600
+ Returns
601
+ -------
602
+ self : object
603
+ Returns the instance itself.
604
+ """
605
+ return self
606
+
607
+ def transform(self, X):
608
+ """Transform the image samples in `X` into a matrix of patch data.
609
+
610
+ Parameters
611
+ ----------
612
+ X : ndarray of shape (n_samples, image_height, image_width) or \
613
+ (n_samples, image_height, image_width, n_channels)
614
+ Array of images from which to extract patches. For color images,
615
+ the last dimension specifies the channel: a RGB image would have
616
+ `n_channels=3`.
617
+
618
+ Returns
619
+ -------
620
+ patches : array of shape (n_patches, patch_height, patch_width) or \
621
+ (n_patches, patch_height, patch_width, n_channels)
622
+ The collection of patches extracted from the images, where
623
+ `n_patches` is either `n_samples * max_patches` or the total
624
+ number of patches that can be extracted.
625
+ """
626
+ X = self._validate_data(
627
+ X=X,
628
+ ensure_2d=False,
629
+ allow_nd=True,
630
+ ensure_min_samples=1,
631
+ ensure_min_features=1,
632
+ reset=False,
633
+ )
634
+ random_state = check_random_state(self.random_state)
635
+ n_imgs, img_height, img_width = X.shape[:3]
636
+ if self.patch_size is None:
637
+ patch_size = img_height // 10, img_width // 10
638
+ else:
639
+ if len(self.patch_size) != 2:
640
+ raise ValueError(
641
+ "patch_size must be a tuple of two integers. Got"
642
+ f" {self.patch_size} instead."
643
+ )
644
+ patch_size = self.patch_size
645
+
646
+ n_imgs, img_height, img_width = X.shape[:3]
647
+ X = np.reshape(X, (n_imgs, img_height, img_width, -1))
648
+ n_channels = X.shape[-1]
649
+
650
+ # compute the dimensions of the patches array
651
+ patch_height, patch_width = patch_size
652
+ n_patches = _compute_n_patches(
653
+ img_height, img_width, patch_height, patch_width, self.max_patches
654
+ )
655
+ patches_shape = (n_imgs * n_patches,) + patch_size
656
+ if n_channels > 1:
657
+ patches_shape += (n_channels,)
658
+
659
+ # extract the patches
660
+ patches = np.empty(patches_shape)
661
+ for ii, image in enumerate(X):
662
+ patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d(
663
+ image,
664
+ patch_size,
665
+ max_patches=self.max_patches,
666
+ random_state=random_state,
667
+ )
668
+ return patches
669
+
670
+ def _more_tags(self):
671
+ return {"X_types": ["3darray"], "stateless": True}
env-llmeval/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py ADDED
File without changes