applied-ai-018 commited on
Commit
696cd06
·
verified ·
1 Parent(s): 596e894

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__init__.py +8 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_base.py +560 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py +888 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py +912 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py +0 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py +466 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py +1422 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py +30 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py +63 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so +0 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_data.py +0 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py +472 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py +1678 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py +431 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_label.py +951 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py +1172 -0
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc ADDED
Binary file (35.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc ADDED
Binary file (37.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (384 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc ADDED
Binary file (65.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc ADDED
Binary file (4.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (219 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc ADDED
Binary file (1.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc ADDED
Binary file (4.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc ADDED
Binary file (38.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc ADDED
Binary file (8.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc ADDED
Binary file (4.52 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (5.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc ADDED
Binary file (44.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc ADDED
Binary file (38.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc ADDED
Binary file (18.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.mixture` module implements mixture modeling algorithms.
3
+ """
4
+
5
+ from ._bayesian_mixture import BayesianGaussianMixture
6
+ from ._gaussian_mixture import GaussianMixture
7
+
8
+ __all__ = ["GaussianMixture", "BayesianGaussianMixture"]
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (431 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc ADDED
Binary file (27.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc ADDED
Binary file (27.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_base.py ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base class for mixture models."""
2
+
3
+ # Author: Wei Xue <[email protected]>
4
+ # Modified by Thierry Guillemot <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+ import warnings
8
+ from abc import ABCMeta, abstractmethod
9
+ from numbers import Integral, Real
10
+ from time import time
11
+
12
+ import numpy as np
13
+ from scipy.special import logsumexp
14
+
15
+ from .. import cluster
16
+ from ..base import BaseEstimator, DensityMixin, _fit_context
17
+ from ..cluster import kmeans_plusplus
18
+ from ..exceptions import ConvergenceWarning
19
+ from ..utils import check_random_state
20
+ from ..utils._param_validation import Interval, StrOptions
21
+ from ..utils.validation import check_is_fitted
22
+
23
+
24
+ def _check_shape(param, param_shape, name):
25
+ """Validate the shape of the input parameter 'param'.
26
+
27
+ Parameters
28
+ ----------
29
+ param : array
30
+
31
+ param_shape : tuple
32
+
33
+ name : str
34
+ """
35
+ param = np.array(param)
36
+ if param.shape != param_shape:
37
+ raise ValueError(
38
+ "The parameter '%s' should have the shape of %s, but got %s"
39
+ % (name, param_shape, param.shape)
40
+ )
41
+
42
+
43
+ class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta):
44
+ """Base class for mixture models.
45
+
46
+ This abstract class specifies an interface for all mixture classes and
47
+ provides basic common methods for mixture models.
48
+ """
49
+
50
+ _parameter_constraints: dict = {
51
+ "n_components": [Interval(Integral, 1, None, closed="left")],
52
+ "tol": [Interval(Real, 0.0, None, closed="left")],
53
+ "reg_covar": [Interval(Real, 0.0, None, closed="left")],
54
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
55
+ "n_init": [Interval(Integral, 1, None, closed="left")],
56
+ "init_params": [
57
+ StrOptions({"kmeans", "random", "random_from_data", "k-means++"})
58
+ ],
59
+ "random_state": ["random_state"],
60
+ "warm_start": ["boolean"],
61
+ "verbose": ["verbose"],
62
+ "verbose_interval": [Interval(Integral, 1, None, closed="left")],
63
+ }
64
+
65
+ def __init__(
66
+ self,
67
+ n_components,
68
+ tol,
69
+ reg_covar,
70
+ max_iter,
71
+ n_init,
72
+ init_params,
73
+ random_state,
74
+ warm_start,
75
+ verbose,
76
+ verbose_interval,
77
+ ):
78
+ self.n_components = n_components
79
+ self.tol = tol
80
+ self.reg_covar = reg_covar
81
+ self.max_iter = max_iter
82
+ self.n_init = n_init
83
+ self.init_params = init_params
84
+ self.random_state = random_state
85
+ self.warm_start = warm_start
86
+ self.verbose = verbose
87
+ self.verbose_interval = verbose_interval
88
+
89
+ @abstractmethod
90
+ def _check_parameters(self, X):
91
+ """Check initial parameters of the derived class.
92
+
93
+ Parameters
94
+ ----------
95
+ X : array-like of shape (n_samples, n_features)
96
+ """
97
+ pass
98
+
99
+ def _initialize_parameters(self, X, random_state):
100
+ """Initialize the model parameters.
101
+
102
+ Parameters
103
+ ----------
104
+ X : array-like of shape (n_samples, n_features)
105
+
106
+ random_state : RandomState
107
+ A random number generator instance that controls the random seed
108
+ used for the method chosen to initialize the parameters.
109
+ """
110
+ n_samples, _ = X.shape
111
+
112
+ if self.init_params == "kmeans":
113
+ resp = np.zeros((n_samples, self.n_components))
114
+ label = (
115
+ cluster.KMeans(
116
+ n_clusters=self.n_components, n_init=1, random_state=random_state
117
+ )
118
+ .fit(X)
119
+ .labels_
120
+ )
121
+ resp[np.arange(n_samples), label] = 1
122
+ elif self.init_params == "random":
123
+ resp = random_state.uniform(size=(n_samples, self.n_components))
124
+ resp /= resp.sum(axis=1)[:, np.newaxis]
125
+ elif self.init_params == "random_from_data":
126
+ resp = np.zeros((n_samples, self.n_components))
127
+ indices = random_state.choice(
128
+ n_samples, size=self.n_components, replace=False
129
+ )
130
+ resp[indices, np.arange(self.n_components)] = 1
131
+ elif self.init_params == "k-means++":
132
+ resp = np.zeros((n_samples, self.n_components))
133
+ _, indices = kmeans_plusplus(
134
+ X,
135
+ self.n_components,
136
+ random_state=random_state,
137
+ )
138
+ resp[indices, np.arange(self.n_components)] = 1
139
+
140
+ self._initialize(X, resp)
141
+
142
+ @abstractmethod
143
+ def _initialize(self, X, resp):
144
+ """Initialize the model parameters of the derived class.
145
+
146
+ Parameters
147
+ ----------
148
+ X : array-like of shape (n_samples, n_features)
149
+
150
+ resp : array-like of shape (n_samples, n_components)
151
+ """
152
+ pass
153
+
154
+ def fit(self, X, y=None):
155
+ """Estimate model parameters with the EM algorithm.
156
+
157
+ The method fits the model ``n_init`` times and sets the parameters with
158
+ which the model has the largest likelihood or lower bound. Within each
159
+ trial, the method iterates between E-step and M-step for ``max_iter``
160
+ times until the change of likelihood or lower bound is less than
161
+ ``tol``, otherwise, a ``ConvergenceWarning`` is raised.
162
+ If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single
163
+ initialization is performed upon the first call. Upon consecutive
164
+ calls, training starts where it left off.
165
+
166
+ Parameters
167
+ ----------
168
+ X : array-like of shape (n_samples, n_features)
169
+ List of n_features-dimensional data points. Each row
170
+ corresponds to a single data point.
171
+
172
+ y : Ignored
173
+ Not used, present for API consistency by convention.
174
+
175
+ Returns
176
+ -------
177
+ self : object
178
+ The fitted mixture.
179
+ """
180
+ # parameters are validated in fit_predict
181
+ self.fit_predict(X, y)
182
+ return self
183
+
184
+ @_fit_context(prefer_skip_nested_validation=True)
185
+ def fit_predict(self, X, y=None):
186
+ """Estimate model parameters using X and predict the labels for X.
187
+
188
+ The method fits the model n_init times and sets the parameters with
189
+ which the model has the largest likelihood or lower bound. Within each
190
+ trial, the method iterates between E-step and M-step for `max_iter`
191
+ times until the change of likelihood or lower bound is less than
192
+ `tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is
193
+ raised. After fitting, it predicts the most probable label for the
194
+ input data points.
195
+
196
+ .. versionadded:: 0.20
197
+
198
+ Parameters
199
+ ----------
200
+ X : array-like of shape (n_samples, n_features)
201
+ List of n_features-dimensional data points. Each row
202
+ corresponds to a single data point.
203
+
204
+ y : Ignored
205
+ Not used, present for API consistency by convention.
206
+
207
+ Returns
208
+ -------
209
+ labels : array, shape (n_samples,)
210
+ Component labels.
211
+ """
212
+ X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_min_samples=2)
213
+ if X.shape[0] < self.n_components:
214
+ raise ValueError(
215
+ "Expected n_samples >= n_components "
216
+ f"but got n_components = {self.n_components}, "
217
+ f"n_samples = {X.shape[0]}"
218
+ )
219
+ self._check_parameters(X)
220
+
221
+ # if we enable warm_start, we will have a unique initialisation
222
+ do_init = not (self.warm_start and hasattr(self, "converged_"))
223
+ n_init = self.n_init if do_init else 1
224
+
225
+ max_lower_bound = -np.inf
226
+ self.converged_ = False
227
+
228
+ random_state = check_random_state(self.random_state)
229
+
230
+ n_samples, _ = X.shape
231
+ for init in range(n_init):
232
+ self._print_verbose_msg_init_beg(init)
233
+
234
+ if do_init:
235
+ self._initialize_parameters(X, random_state)
236
+
237
+ lower_bound = -np.inf if do_init else self.lower_bound_
238
+
239
+ if self.max_iter == 0:
240
+ best_params = self._get_parameters()
241
+ best_n_iter = 0
242
+ else:
243
+ for n_iter in range(1, self.max_iter + 1):
244
+ prev_lower_bound = lower_bound
245
+
246
+ log_prob_norm, log_resp = self._e_step(X)
247
+ self._m_step(X, log_resp)
248
+ lower_bound = self._compute_lower_bound(log_resp, log_prob_norm)
249
+
250
+ change = lower_bound - prev_lower_bound
251
+ self._print_verbose_msg_iter_end(n_iter, change)
252
+
253
+ if abs(change) < self.tol:
254
+ self.converged_ = True
255
+ break
256
+
257
+ self._print_verbose_msg_init_end(lower_bound)
258
+
259
+ if lower_bound > max_lower_bound or max_lower_bound == -np.inf:
260
+ max_lower_bound = lower_bound
261
+ best_params = self._get_parameters()
262
+ best_n_iter = n_iter
263
+
264
+ # Should only warn about convergence if max_iter > 0, otherwise
265
+ # the user is assumed to have used 0-iters initialization
266
+ # to get the initial means.
267
+ if not self.converged_ and self.max_iter > 0:
268
+ warnings.warn(
269
+ "Initialization %d did not converge. "
270
+ "Try different init parameters, "
271
+ "or increase max_iter, tol "
272
+ "or check for degenerate data." % (init + 1),
273
+ ConvergenceWarning,
274
+ )
275
+
276
+ self._set_parameters(best_params)
277
+ self.n_iter_ = best_n_iter
278
+ self.lower_bound_ = max_lower_bound
279
+
280
+ # Always do a final e-step to guarantee that the labels returned by
281
+ # fit_predict(X) are always consistent with fit(X).predict(X)
282
+ # for any value of max_iter and tol (and any random_state).
283
+ _, log_resp = self._e_step(X)
284
+
285
+ return log_resp.argmax(axis=1)
286
+
287
+ def _e_step(self, X):
288
+ """E step.
289
+
290
+ Parameters
291
+ ----------
292
+ X : array-like of shape (n_samples, n_features)
293
+
294
+ Returns
295
+ -------
296
+ log_prob_norm : float
297
+ Mean of the logarithms of the probabilities of each sample in X
298
+
299
+ log_responsibility : array, shape (n_samples, n_components)
300
+ Logarithm of the posterior probabilities (or responsibilities) of
301
+ the point of each sample in X.
302
+ """
303
+ log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
304
+ return np.mean(log_prob_norm), log_resp
305
+
306
+ @abstractmethod
307
+ def _m_step(self, X, log_resp):
308
+ """M step.
309
+
310
+ Parameters
311
+ ----------
312
+ X : array-like of shape (n_samples, n_features)
313
+
314
+ log_resp : array-like of shape (n_samples, n_components)
315
+ Logarithm of the posterior probabilities (or responsibilities) of
316
+ the point of each sample in X.
317
+ """
318
+ pass
319
+
320
+ @abstractmethod
321
+ def _get_parameters(self):
322
+ pass
323
+
324
+ @abstractmethod
325
+ def _set_parameters(self, params):
326
+ pass
327
+
328
+ def score_samples(self, X):
329
+ """Compute the log-likelihood of each sample.
330
+
331
+ Parameters
332
+ ----------
333
+ X : array-like of shape (n_samples, n_features)
334
+ List of n_features-dimensional data points. Each row
335
+ corresponds to a single data point.
336
+
337
+ Returns
338
+ -------
339
+ log_prob : array, shape (n_samples,)
340
+ Log-likelihood of each sample in `X` under the current model.
341
+ """
342
+ check_is_fitted(self)
343
+ X = self._validate_data(X, reset=False)
344
+
345
+ return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
346
+
347
+ def score(self, X, y=None):
348
+ """Compute the per-sample average log-likelihood of the given data X.
349
+
350
+ Parameters
351
+ ----------
352
+ X : array-like of shape (n_samples, n_dimensions)
353
+ List of n_features-dimensional data points. Each row
354
+ corresponds to a single data point.
355
+
356
+ y : Ignored
357
+ Not used, present for API consistency by convention.
358
+
359
+ Returns
360
+ -------
361
+ log_likelihood : float
362
+ Log-likelihood of `X` under the Gaussian mixture model.
363
+ """
364
+ return self.score_samples(X).mean()
365
+
366
+ def predict(self, X):
367
+ """Predict the labels for the data samples in X using trained model.
368
+
369
+ Parameters
370
+ ----------
371
+ X : array-like of shape (n_samples, n_features)
372
+ List of n_features-dimensional data points. Each row
373
+ corresponds to a single data point.
374
+
375
+ Returns
376
+ -------
377
+ labels : array, shape (n_samples,)
378
+ Component labels.
379
+ """
380
+ check_is_fitted(self)
381
+ X = self._validate_data(X, reset=False)
382
+ return self._estimate_weighted_log_prob(X).argmax(axis=1)
383
+
384
+ def predict_proba(self, X):
385
+ """Evaluate the components' density for each sample.
386
+
387
+ Parameters
388
+ ----------
389
+ X : array-like of shape (n_samples, n_features)
390
+ List of n_features-dimensional data points. Each row
391
+ corresponds to a single data point.
392
+
393
+ Returns
394
+ -------
395
+ resp : array, shape (n_samples, n_components)
396
+ Density of each Gaussian component for each sample in X.
397
+ """
398
+ check_is_fitted(self)
399
+ X = self._validate_data(X, reset=False)
400
+ _, log_resp = self._estimate_log_prob_resp(X)
401
+ return np.exp(log_resp)
402
+
403
+ def sample(self, n_samples=1):
404
+ """Generate random samples from the fitted Gaussian distribution.
405
+
406
+ Parameters
407
+ ----------
408
+ n_samples : int, default=1
409
+ Number of samples to generate.
410
+
411
+ Returns
412
+ -------
413
+ X : array, shape (n_samples, n_features)
414
+ Randomly generated sample.
415
+
416
+ y : array, shape (nsamples,)
417
+ Component labels.
418
+ """
419
+ check_is_fitted(self)
420
+
421
+ if n_samples < 1:
422
+ raise ValueError(
423
+ "Invalid value for 'n_samples': %d . The sampling requires at "
424
+ "least one sample." % (self.n_components)
425
+ )
426
+
427
+ _, n_features = self.means_.shape
428
+ rng = check_random_state(self.random_state)
429
+ n_samples_comp = rng.multinomial(n_samples, self.weights_)
430
+
431
+ if self.covariance_type == "full":
432
+ X = np.vstack(
433
+ [
434
+ rng.multivariate_normal(mean, covariance, int(sample))
435
+ for (mean, covariance, sample) in zip(
436
+ self.means_, self.covariances_, n_samples_comp
437
+ )
438
+ ]
439
+ )
440
+ elif self.covariance_type == "tied":
441
+ X = np.vstack(
442
+ [
443
+ rng.multivariate_normal(mean, self.covariances_, int(sample))
444
+ for (mean, sample) in zip(self.means_, n_samples_comp)
445
+ ]
446
+ )
447
+ else:
448
+ X = np.vstack(
449
+ [
450
+ mean
451
+ + rng.standard_normal(size=(sample, n_features))
452
+ * np.sqrt(covariance)
453
+ for (mean, covariance, sample) in zip(
454
+ self.means_, self.covariances_, n_samples_comp
455
+ )
456
+ ]
457
+ )
458
+
459
+ y = np.concatenate(
460
+ [np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)]
461
+ )
462
+
463
+ return (X, y)
464
+
465
+ def _estimate_weighted_log_prob(self, X):
466
+ """Estimate the weighted log-probabilities, log P(X | Z) + log weights.
467
+
468
+ Parameters
469
+ ----------
470
+ X : array-like of shape (n_samples, n_features)
471
+
472
+ Returns
473
+ -------
474
+ weighted_log_prob : array, shape (n_samples, n_component)
475
+ """
476
+ return self._estimate_log_prob(X) + self._estimate_log_weights()
477
+
478
+ @abstractmethod
479
+ def _estimate_log_weights(self):
480
+ """Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
481
+
482
+ Returns
483
+ -------
484
+ log_weight : array, shape (n_components, )
485
+ """
486
+ pass
487
+
488
+ @abstractmethod
489
+ def _estimate_log_prob(self, X):
490
+ """Estimate the log-probabilities log P(X | Z).
491
+
492
+ Compute the log-probabilities per each component for each sample.
493
+
494
+ Parameters
495
+ ----------
496
+ X : array-like of shape (n_samples, n_features)
497
+
498
+ Returns
499
+ -------
500
+ log_prob : array, shape (n_samples, n_component)
501
+ """
502
+ pass
503
+
504
+ def _estimate_log_prob_resp(self, X):
505
+ """Estimate log probabilities and responsibilities for each sample.
506
+
507
+ Compute the log probabilities, weighted log probabilities per
508
+ component and responsibilities for each sample in X with respect to
509
+ the current state of the model.
510
+
511
+ Parameters
512
+ ----------
513
+ X : array-like of shape (n_samples, n_features)
514
+
515
+ Returns
516
+ -------
517
+ log_prob_norm : array, shape (n_samples,)
518
+ log p(X)
519
+
520
+ log_responsibilities : array, shape (n_samples, n_components)
521
+ logarithm of the responsibilities
522
+ """
523
+ weighted_log_prob = self._estimate_weighted_log_prob(X)
524
+ log_prob_norm = logsumexp(weighted_log_prob, axis=1)
525
+ with np.errstate(under="ignore"):
526
+ # ignore underflow
527
+ log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
528
+ return log_prob_norm, log_resp
529
+
530
+ def _print_verbose_msg_init_beg(self, n_init):
531
+ """Print verbose message on initialization."""
532
+ if self.verbose == 1:
533
+ print("Initialization %d" % n_init)
534
+ elif self.verbose >= 2:
535
+ print("Initialization %d" % n_init)
536
+ self._init_prev_time = time()
537
+ self._iter_prev_time = self._init_prev_time
538
+
539
+ def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
540
+ """Print verbose message on initialization."""
541
+ if n_iter % self.verbose_interval == 0:
542
+ if self.verbose == 1:
543
+ print(" Iteration %d" % n_iter)
544
+ elif self.verbose >= 2:
545
+ cur_time = time()
546
+ print(
547
+ " Iteration %d\t time lapse %.5fs\t ll change %.5f"
548
+ % (n_iter, cur_time - self._iter_prev_time, diff_ll)
549
+ )
550
+ self._iter_prev_time = cur_time
551
+
552
+ def _print_verbose_msg_init_end(self, ll):
553
+ """Print verbose message on the end of iteration."""
554
+ if self.verbose == 1:
555
+ print("Initialization converged: %s" % self.converged_)
556
+ elif self.verbose >= 2:
557
+ print(
558
+ "Initialization converged: %s\t time lapse %.5fs\t ll %.5f"
559
+ % (self.converged_, time() - self._init_prev_time, ll)
560
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py ADDED
@@ -0,0 +1,888 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bayesian Gaussian Mixture Model."""
2
+ # Author: Wei Xue <[email protected]>
3
+ # Thierry Guillemot <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import math
7
+ from numbers import Real
8
+
9
+ import numpy as np
10
+ from scipy.special import betaln, digamma, gammaln
11
+
12
+ from ..utils import check_array
13
+ from ..utils._param_validation import Interval, StrOptions
14
+ from ._base import BaseMixture, _check_shape
15
+ from ._gaussian_mixture import (
16
+ _check_precision_matrix,
17
+ _check_precision_positivity,
18
+ _compute_log_det_cholesky,
19
+ _compute_precision_cholesky,
20
+ _estimate_gaussian_parameters,
21
+ _estimate_log_gaussian_prob,
22
+ )
23
+
24
+
25
+ def _log_dirichlet_norm(dirichlet_concentration):
26
+ """Compute the log of the Dirichlet distribution normalization term.
27
+
28
+ Parameters
29
+ ----------
30
+ dirichlet_concentration : array-like of shape (n_samples,)
31
+ The parameters values of the Dirichlet distribution.
32
+
33
+ Returns
34
+ -------
35
+ log_dirichlet_norm : float
36
+ The log normalization of the Dirichlet distribution.
37
+ """
38
+ return gammaln(np.sum(dirichlet_concentration)) - np.sum(
39
+ gammaln(dirichlet_concentration)
40
+ )
41
+
42
+
43
+ def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features):
44
+ """Compute the log of the Wishart distribution normalization term.
45
+
46
+ Parameters
47
+ ----------
48
+ degrees_of_freedom : array-like of shape (n_components,)
49
+ The number of degrees of freedom on the covariance Wishart
50
+ distributions.
51
+
52
+ log_det_precision_chol : array-like of shape (n_components,)
53
+ The determinant of the precision matrix for each component.
54
+
55
+ n_features : int
56
+ The number of features.
57
+
58
+ Return
59
+ ------
60
+ log_wishart_norm : array-like of shape (n_components,)
61
+ The log normalization of the Wishart distribution.
62
+ """
63
+ # To simplify the computation we have removed the np.log(np.pi) term
64
+ return -(
65
+ degrees_of_freedom * log_det_precisions_chol
66
+ + degrees_of_freedom * n_features * 0.5 * math.log(2.0)
67
+ + np.sum(
68
+ gammaln(0.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])),
69
+ 0,
70
+ )
71
+ )
72
+
73
+
74
+ class BayesianGaussianMixture(BaseMixture):
75
+ """Variational Bayesian estimation of a Gaussian mixture.
76
+
77
+ This class allows to infer an approximate posterior distribution over the
78
+ parameters of a Gaussian mixture distribution. The effective number of
79
+ components can be inferred from the data.
80
+
81
+ This class implements two types of prior for the weights distribution: a
82
+ finite mixture model with Dirichlet distribution and an infinite mixture
83
+ model with the Dirichlet Process. In practice Dirichlet Process inference
84
+ algorithm is approximated and uses a truncated distribution with a fixed
85
+ maximum number of components (called the Stick-breaking representation).
86
+ The number of components actually used almost always depends on the data.
87
+
88
+ .. versionadded:: 0.18
89
+
90
+ Read more in the :ref:`User Guide <bgmm>`.
91
+
92
+ Parameters
93
+ ----------
94
+ n_components : int, default=1
95
+ The number of mixture components. Depending on the data and the value
96
+ of the `weight_concentration_prior` the model can decide to not use
97
+ all the components by setting some component `weights_` to values very
98
+ close to zero. The number of effective components is therefore smaller
99
+ than n_components.
100
+
101
+ covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
102
+ String describing the type of covariance parameters to use.
103
+ Must be one of::
104
+
105
+ 'full' (each component has its own general covariance matrix),
106
+ 'tied' (all components share the same general covariance matrix),
107
+ 'diag' (each component has its own diagonal covariance matrix),
108
+ 'spherical' (each component has its own single variance).
109
+
110
+ tol : float, default=1e-3
111
+ The convergence threshold. EM iterations will stop when the
112
+ lower bound average gain on the likelihood (of the training data with
113
+ respect to the model) is below this threshold.
114
+
115
+ reg_covar : float, default=1e-6
116
+ Non-negative regularization added to the diagonal of covariance.
117
+ Allows to assure that the covariance matrices are all positive.
118
+
119
+ max_iter : int, default=100
120
+ The number of EM iterations to perform.
121
+
122
+ n_init : int, default=1
123
+ The number of initializations to perform. The result with the highest
124
+ lower bound value on the likelihood is kept.
125
+
126
+ init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \
127
+ default='kmeans'
128
+ The method used to initialize the weights, the means and the
129
+ covariances.
130
+ String must be one of:
131
+
132
+ 'kmeans' : responsibilities are initialized using kmeans.
133
+ 'k-means++' : use the k-means++ method to initialize.
134
+ 'random' : responsibilities are initialized randomly.
135
+ 'random_from_data' : initial means are randomly selected data points.
136
+
137
+ .. versionchanged:: v1.1
138
+ `init_params` now accepts 'random_from_data' and 'k-means++' as
139
+ initialization methods.
140
+
141
+ weight_concentration_prior_type : {'dirichlet_process', 'dirichlet_distribution'}, \
142
+ default='dirichlet_process'
143
+ String describing the type of the weight concentration prior.
144
+
145
+ weight_concentration_prior : float or None, default=None
146
+ The dirichlet concentration of each component on the weight
147
+ distribution (Dirichlet). This is commonly called gamma in the
148
+ literature. The higher concentration puts more mass in
149
+ the center and will lead to more components being active, while a lower
150
+ concentration parameter will lead to more mass at the edge of the
151
+ mixture weights simplex. The value of the parameter must be greater
152
+ than 0. If it is None, it's set to ``1. / n_components``.
153
+
154
+ mean_precision_prior : float or None, default=None
155
+ The precision prior on the mean distribution (Gaussian).
156
+ Controls the extent of where means can be placed. Larger
157
+ values concentrate the cluster means around `mean_prior`.
158
+ The value of the parameter must be greater than 0.
159
+ If it is None, it is set to 1.
160
+
161
+ mean_prior : array-like, shape (n_features,), default=None
162
+ The prior on the mean distribution (Gaussian).
163
+ If it is None, it is set to the mean of X.
164
+
165
+ degrees_of_freedom_prior : float or None, default=None
166
+ The prior of the number of degrees of freedom on the covariance
167
+ distributions (Wishart). If it is None, it's set to `n_features`.
168
+
169
+ covariance_prior : float or array-like, default=None
170
+ The prior on the covariance distribution (Wishart).
171
+ If it is None, the emiprical covariance prior is initialized using the
172
+ covariance of X. The shape depends on `covariance_type`::
173
+
174
+ (n_features, n_features) if 'full',
175
+ (n_features, n_features) if 'tied',
176
+ (n_features) if 'diag',
177
+ float if 'spherical'
178
+
179
+ random_state : int, RandomState instance or None, default=None
180
+ Controls the random seed given to the method chosen to initialize the
181
+ parameters (see `init_params`).
182
+ In addition, it controls the generation of random samples from the
183
+ fitted distribution (see the method `sample`).
184
+ Pass an int for reproducible output across multiple function calls.
185
+ See :term:`Glossary <random_state>`.
186
+
187
+ warm_start : bool, default=False
188
+ If 'warm_start' is True, the solution of the last fitting is used as
189
+ initialization for the next call of fit(). This can speed up
190
+ convergence when fit is called several times on similar problems.
191
+ See :term:`the Glossary <warm_start>`.
192
+
193
+ verbose : int, default=0
194
+ Enable verbose output. If 1 then it prints the current
195
+ initialization and each iteration step. If greater than 1 then
196
+ it prints also the log probability and the time needed
197
+ for each step.
198
+
199
+ verbose_interval : int, default=10
200
+ Number of iteration done before the next print.
201
+
202
+ Attributes
203
+ ----------
204
+ weights_ : array-like of shape (n_components,)
205
+ The weights of each mixture components.
206
+
207
+ means_ : array-like of shape (n_components, n_features)
208
+ The mean of each mixture component.
209
+
210
+ covariances_ : array-like
211
+ The covariance of each mixture component.
212
+ The shape depends on `covariance_type`::
213
+
214
+ (n_components,) if 'spherical',
215
+ (n_features, n_features) if 'tied',
216
+ (n_components, n_features) if 'diag',
217
+ (n_components, n_features, n_features) if 'full'
218
+
219
+ precisions_ : array-like
220
+ The precision matrices for each component in the mixture. A precision
221
+ matrix is the inverse of a covariance matrix. A covariance matrix is
222
+ symmetric positive definite so the mixture of Gaussian can be
223
+ equivalently parameterized by the precision matrices. Storing the
224
+ precision matrices instead of the covariance matrices makes it more
225
+ efficient to compute the log-likelihood of new samples at test time.
226
+ The shape depends on ``covariance_type``::
227
+
228
+ (n_components,) if 'spherical',
229
+ (n_features, n_features) if 'tied',
230
+ (n_components, n_features) if 'diag',
231
+ (n_components, n_features, n_features) if 'full'
232
+
233
+ precisions_cholesky_ : array-like
234
+ The cholesky decomposition of the precision matrices of each mixture
235
+ component. A precision matrix is the inverse of a covariance matrix.
236
+ A covariance matrix is symmetric positive definite so the mixture of
237
+ Gaussian can be equivalently parameterized by the precision matrices.
238
+ Storing the precision matrices instead of the covariance matrices makes
239
+ it more efficient to compute the log-likelihood of new samples at test
240
+ time. The shape depends on ``covariance_type``::
241
+
242
+ (n_components,) if 'spherical',
243
+ (n_features, n_features) if 'tied',
244
+ (n_components, n_features) if 'diag',
245
+ (n_components, n_features, n_features) if 'full'
246
+
247
+ converged_ : bool
248
+ True when convergence was reached in fit(), False otherwise.
249
+
250
+ n_iter_ : int
251
+ Number of step used by the best fit of inference to reach the
252
+ convergence.
253
+
254
+ lower_bound_ : float
255
+ Lower bound value on the model evidence (of the training data) of the
256
+ best fit of inference.
257
+
258
+ weight_concentration_prior_ : tuple or float
259
+ The dirichlet concentration of each component on the weight
260
+ distribution (Dirichlet). The type depends on
261
+ ``weight_concentration_prior_type``::
262
+
263
+ (float, float) if 'dirichlet_process' (Beta parameters),
264
+ float if 'dirichlet_distribution' (Dirichlet parameters).
265
+
266
+ The higher concentration puts more mass in
267
+ the center and will lead to more components being active, while a lower
268
+ concentration parameter will lead to more mass at the edge of the
269
+ simplex.
270
+
271
+ weight_concentration_ : array-like of shape (n_components,)
272
+ The dirichlet concentration of each component on the weight
273
+ distribution (Dirichlet).
274
+
275
+ mean_precision_prior_ : float
276
+ The precision prior on the mean distribution (Gaussian).
277
+ Controls the extent of where means can be placed.
278
+ Larger values concentrate the cluster means around `mean_prior`.
279
+ If mean_precision_prior is set to None, `mean_precision_prior_` is set
280
+ to 1.
281
+
282
+ mean_precision_ : array-like of shape (n_components,)
283
+ The precision of each components on the mean distribution (Gaussian).
284
+
285
+ mean_prior_ : array-like of shape (n_features,)
286
+ The prior on the mean distribution (Gaussian).
287
+
288
+ degrees_of_freedom_prior_ : float
289
+ The prior of the number of degrees of freedom on the covariance
290
+ distributions (Wishart).
291
+
292
+ degrees_of_freedom_ : array-like of shape (n_components,)
293
+ The number of degrees of freedom of each components in the model.
294
+
295
+ covariance_prior_ : float or array-like
296
+ The prior on the covariance distribution (Wishart).
297
+ The shape depends on `covariance_type`::
298
+
299
+ (n_features, n_features) if 'full',
300
+ (n_features, n_features) if 'tied',
301
+ (n_features) if 'diag',
302
+ float if 'spherical'
303
+
304
+ n_features_in_ : int
305
+ Number of features seen during :term:`fit`.
306
+
307
+ .. versionadded:: 0.24
308
+
309
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
310
+ Names of features seen during :term:`fit`. Defined only when `X`
311
+ has feature names that are all strings.
312
+
313
+ .. versionadded:: 1.0
314
+
315
+ See Also
316
+ --------
317
+ GaussianMixture : Finite Gaussian mixture fit with EM.
318
+
319
+ References
320
+ ----------
321
+
322
+ .. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine
323
+ learning". Vol. 4 No. 4. New York: Springer.
324
+ <https://www.springer.com/kr/book/9780387310732>`_
325
+
326
+ .. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for
327
+ Graphical Models". In Advances in Neural Information Processing
328
+ Systems 12.
329
+ <https://citeseerx.ist.psu.edu/doc_view/pid/ee844fd96db7041a9681b5a18bff008912052c7e>`_
330
+
331
+ .. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational
332
+ inference for Dirichlet process mixtures". Bayesian analysis 1.1
333
+ <https://www.cs.princeton.edu/courses/archive/fall11/cos597C/reading/BleiJordan2005.pdf>`_
334
+
335
+ Examples
336
+ --------
337
+ >>> import numpy as np
338
+ >>> from sklearn.mixture import BayesianGaussianMixture
339
+ >>> X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [12, 4], [10, 7]])
340
+ >>> bgm = BayesianGaussianMixture(n_components=2, random_state=42).fit(X)
341
+ >>> bgm.means_
342
+ array([[2.49... , 2.29...],
343
+ [8.45..., 4.52... ]])
344
+ >>> bgm.predict([[0, 0], [9, 3]])
345
+ array([0, 1])
346
+ """
347
+
348
+ _parameter_constraints: dict = {
349
+ **BaseMixture._parameter_constraints,
350
+ "covariance_type": [StrOptions({"spherical", "tied", "diag", "full"})],
351
+ "weight_concentration_prior_type": [
352
+ StrOptions({"dirichlet_process", "dirichlet_distribution"})
353
+ ],
354
+ "weight_concentration_prior": [
355
+ None,
356
+ Interval(Real, 0.0, None, closed="neither"),
357
+ ],
358
+ "mean_precision_prior": [None, Interval(Real, 0.0, None, closed="neither")],
359
+ "mean_prior": [None, "array-like"],
360
+ "degrees_of_freedom_prior": [None, Interval(Real, 0.0, None, closed="neither")],
361
+ "covariance_prior": [
362
+ None,
363
+ "array-like",
364
+ Interval(Real, 0.0, None, closed="neither"),
365
+ ],
366
+ }
367
+
368
+ def __init__(
369
+ self,
370
+ *,
371
+ n_components=1,
372
+ covariance_type="full",
373
+ tol=1e-3,
374
+ reg_covar=1e-6,
375
+ max_iter=100,
376
+ n_init=1,
377
+ init_params="kmeans",
378
+ weight_concentration_prior_type="dirichlet_process",
379
+ weight_concentration_prior=None,
380
+ mean_precision_prior=None,
381
+ mean_prior=None,
382
+ degrees_of_freedom_prior=None,
383
+ covariance_prior=None,
384
+ random_state=None,
385
+ warm_start=False,
386
+ verbose=0,
387
+ verbose_interval=10,
388
+ ):
389
+ super().__init__(
390
+ n_components=n_components,
391
+ tol=tol,
392
+ reg_covar=reg_covar,
393
+ max_iter=max_iter,
394
+ n_init=n_init,
395
+ init_params=init_params,
396
+ random_state=random_state,
397
+ warm_start=warm_start,
398
+ verbose=verbose,
399
+ verbose_interval=verbose_interval,
400
+ )
401
+
402
+ self.covariance_type = covariance_type
403
+ self.weight_concentration_prior_type = weight_concentration_prior_type
404
+ self.weight_concentration_prior = weight_concentration_prior
405
+ self.mean_precision_prior = mean_precision_prior
406
+ self.mean_prior = mean_prior
407
+ self.degrees_of_freedom_prior = degrees_of_freedom_prior
408
+ self.covariance_prior = covariance_prior
409
+
410
+ def _check_parameters(self, X):
411
+ """Check that the parameters are well defined.
412
+
413
+ Parameters
414
+ ----------
415
+ X : array-like of shape (n_samples, n_features)
416
+ """
417
+ self._check_weights_parameters()
418
+ self._check_means_parameters(X)
419
+ self._check_precision_parameters(X)
420
+ self._checkcovariance_prior_parameter(X)
421
+
422
+ def _check_weights_parameters(self):
423
+ """Check the parameter of the Dirichlet distribution."""
424
+ if self.weight_concentration_prior is None:
425
+ self.weight_concentration_prior_ = 1.0 / self.n_components
426
+ else:
427
+ self.weight_concentration_prior_ = self.weight_concentration_prior
428
+
429
+ def _check_means_parameters(self, X):
430
+ """Check the parameters of the Gaussian distribution.
431
+
432
+ Parameters
433
+ ----------
434
+ X : array-like of shape (n_samples, n_features)
435
+ """
436
+ _, n_features = X.shape
437
+
438
+ if self.mean_precision_prior is None:
439
+ self.mean_precision_prior_ = 1.0
440
+ else:
441
+ self.mean_precision_prior_ = self.mean_precision_prior
442
+
443
+ if self.mean_prior is None:
444
+ self.mean_prior_ = X.mean(axis=0)
445
+ else:
446
+ self.mean_prior_ = check_array(
447
+ self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False
448
+ )
449
+ _check_shape(self.mean_prior_, (n_features,), "means")
450
+
451
+ def _check_precision_parameters(self, X):
452
+ """Check the prior parameters of the precision distribution.
453
+
454
+ Parameters
455
+ ----------
456
+ X : array-like of shape (n_samples, n_features)
457
+ """
458
+ _, n_features = X.shape
459
+
460
+ if self.degrees_of_freedom_prior is None:
461
+ self.degrees_of_freedom_prior_ = n_features
462
+ elif self.degrees_of_freedom_prior > n_features - 1.0:
463
+ self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior
464
+ else:
465
+ raise ValueError(
466
+ "The parameter 'degrees_of_freedom_prior' "
467
+ "should be greater than %d, but got %.3f."
468
+ % (n_features - 1, self.degrees_of_freedom_prior)
469
+ )
470
+
471
+ def _checkcovariance_prior_parameter(self, X):
472
+ """Check the `covariance_prior_`.
473
+
474
+ Parameters
475
+ ----------
476
+ X : array-like of shape (n_samples, n_features)
477
+ """
478
+ _, n_features = X.shape
479
+
480
+ if self.covariance_prior is None:
481
+ self.covariance_prior_ = {
482
+ "full": np.atleast_2d(np.cov(X.T)),
483
+ "tied": np.atleast_2d(np.cov(X.T)),
484
+ "diag": np.var(X, axis=0, ddof=1),
485
+ "spherical": np.var(X, axis=0, ddof=1).mean(),
486
+ }[self.covariance_type]
487
+
488
+ elif self.covariance_type in ["full", "tied"]:
489
+ self.covariance_prior_ = check_array(
490
+ self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False
491
+ )
492
+ _check_shape(
493
+ self.covariance_prior_,
494
+ (n_features, n_features),
495
+ "%s covariance_prior" % self.covariance_type,
496
+ )
497
+ _check_precision_matrix(self.covariance_prior_, self.covariance_type)
498
+ elif self.covariance_type == "diag":
499
+ self.covariance_prior_ = check_array(
500
+ self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False
501
+ )
502
+ _check_shape(
503
+ self.covariance_prior_,
504
+ (n_features,),
505
+ "%s covariance_prior" % self.covariance_type,
506
+ )
507
+ _check_precision_positivity(self.covariance_prior_, self.covariance_type)
508
+ # spherical case
509
+ else:
510
+ self.covariance_prior_ = self.covariance_prior
511
+
512
+ def _initialize(self, X, resp):
513
+ """Initialization of the mixture parameters.
514
+
515
+ Parameters
516
+ ----------
517
+ X : array-like of shape (n_samples, n_features)
518
+
519
+ resp : array-like of shape (n_samples, n_components)
520
+ """
521
+ nk, xk, sk = _estimate_gaussian_parameters(
522
+ X, resp, self.reg_covar, self.covariance_type
523
+ )
524
+
525
+ self._estimate_weights(nk)
526
+ self._estimate_means(nk, xk)
527
+ self._estimate_precisions(nk, xk, sk)
528
+
529
+ def _estimate_weights(self, nk):
530
+ """Estimate the parameters of the Dirichlet distribution.
531
+
532
+ Parameters
533
+ ----------
534
+ nk : array-like of shape (n_components,)
535
+ """
536
+ if self.weight_concentration_prior_type == "dirichlet_process":
537
+ # For dirichlet process weight_concentration will be a tuple
538
+ # containing the two parameters of the beta distribution
539
+ self.weight_concentration_ = (
540
+ 1.0 + nk,
541
+ (
542
+ self.weight_concentration_prior_
543
+ + np.hstack((np.cumsum(nk[::-1])[-2::-1], 0))
544
+ ),
545
+ )
546
+ else:
547
+ # case Variational Gaussian mixture with dirichlet distribution
548
+ self.weight_concentration_ = self.weight_concentration_prior_ + nk
549
+
550
+ def _estimate_means(self, nk, xk):
551
+ """Estimate the parameters of the Gaussian distribution.
552
+
553
+ Parameters
554
+ ----------
555
+ nk : array-like of shape (n_components,)
556
+
557
+ xk : array-like of shape (n_components, n_features)
558
+ """
559
+ self.mean_precision_ = self.mean_precision_prior_ + nk
560
+ self.means_ = (
561
+ self.mean_precision_prior_ * self.mean_prior_ + nk[:, np.newaxis] * xk
562
+ ) / self.mean_precision_[:, np.newaxis]
563
+
564
+ def _estimate_precisions(self, nk, xk, sk):
565
+ """Estimate the precisions parameters of the precision distribution.
566
+
567
+ Parameters
568
+ ----------
569
+ nk : array-like of shape (n_components,)
570
+
571
+ xk : array-like of shape (n_components, n_features)
572
+
573
+ sk : array-like
574
+ The shape depends of `covariance_type`:
575
+ 'full' : (n_components, n_features, n_features)
576
+ 'tied' : (n_features, n_features)
577
+ 'diag' : (n_components, n_features)
578
+ 'spherical' : (n_components,)
579
+ """
580
+ {
581
+ "full": self._estimate_wishart_full,
582
+ "tied": self._estimate_wishart_tied,
583
+ "diag": self._estimate_wishart_diag,
584
+ "spherical": self._estimate_wishart_spherical,
585
+ }[self.covariance_type](nk, xk, sk)
586
+
587
+ self.precisions_cholesky_ = _compute_precision_cholesky(
588
+ self.covariances_, self.covariance_type
589
+ )
590
+
591
+ def _estimate_wishart_full(self, nk, xk, sk):
592
+ """Estimate the full Wishart distribution parameters.
593
+
594
+ Parameters
595
+ ----------
596
+ X : array-like of shape (n_samples, n_features)
597
+
598
+ nk : array-like of shape (n_components,)
599
+
600
+ xk : array-like of shape (n_components, n_features)
601
+
602
+ sk : array-like of shape (n_components, n_features, n_features)
603
+ """
604
+ _, n_features = xk.shape
605
+
606
+ # Warning : in some Bishop book, there is a typo on the formula 10.63
607
+ # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is
608
+ # the correct formula
609
+ self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
610
+
611
+ self.covariances_ = np.empty((self.n_components, n_features, n_features))
612
+
613
+ for k in range(self.n_components):
614
+ diff = xk[k] - self.mean_prior_
615
+ self.covariances_[k] = (
616
+ self.covariance_prior_
617
+ + nk[k] * sk[k]
618
+ + nk[k]
619
+ * self.mean_precision_prior_
620
+ / self.mean_precision_[k]
621
+ * np.outer(diff, diff)
622
+ )
623
+
624
+ # Contrary to the original bishop book, we normalize the covariances
625
+ self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis]
626
+
627
+ def _estimate_wishart_tied(self, nk, xk, sk):
628
+ """Estimate the tied Wishart distribution parameters.
629
+
630
+ Parameters
631
+ ----------
632
+ X : array-like of shape (n_samples, n_features)
633
+
634
+ nk : array-like of shape (n_components,)
635
+
636
+ xk : array-like of shape (n_components, n_features)
637
+
638
+ sk : array-like of shape (n_features, n_features)
639
+ """
640
+ _, n_features = xk.shape
641
+
642
+ # Warning : in some Bishop book, there is a typo on the formula 10.63
643
+ # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
644
+ # is the correct formula
645
+ self.degrees_of_freedom_ = (
646
+ self.degrees_of_freedom_prior_ + nk.sum() / self.n_components
647
+ )
648
+
649
+ diff = xk - self.mean_prior_
650
+ self.covariances_ = (
651
+ self.covariance_prior_
652
+ + sk * nk.sum() / self.n_components
653
+ + self.mean_precision_prior_
654
+ / self.n_components
655
+ * np.dot((nk / self.mean_precision_) * diff.T, diff)
656
+ )
657
+
658
+ # Contrary to the original bishop book, we normalize the covariances
659
+ self.covariances_ /= self.degrees_of_freedom_
660
+
661
+ def _estimate_wishart_diag(self, nk, xk, sk):
662
+ """Estimate the diag Wishart distribution parameters.
663
+
664
+ Parameters
665
+ ----------
666
+ X : array-like of shape (n_samples, n_features)
667
+
668
+ nk : array-like of shape (n_components,)
669
+
670
+ xk : array-like of shape (n_components, n_features)
671
+
672
+ sk : array-like of shape (n_components, n_features)
673
+ """
674
+ _, n_features = xk.shape
675
+
676
+ # Warning : in some Bishop book, there is a typo on the formula 10.63
677
+ # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
678
+ # is the correct formula
679
+ self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
680
+
681
+ diff = xk - self.mean_prior_
682
+ self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * (
683
+ sk
684
+ + (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis]
685
+ * np.square(diff)
686
+ )
687
+
688
+ # Contrary to the original bishop book, we normalize the covariances
689
+ self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]
690
+
691
+ def _estimate_wishart_spherical(self, nk, xk, sk):
692
+ """Estimate the spherical Wishart distribution parameters.
693
+
694
+ Parameters
695
+ ----------
696
+ X : array-like of shape (n_samples, n_features)
697
+
698
+ nk : array-like of shape (n_components,)
699
+
700
+ xk : array-like of shape (n_components, n_features)
701
+
702
+ sk : array-like of shape (n_components,)
703
+ """
704
+ _, n_features = xk.shape
705
+
706
+ # Warning : in some Bishop book, there is a typo on the formula 10.63
707
+ # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
708
+ # is the correct formula
709
+ self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
710
+
711
+ diff = xk - self.mean_prior_
712
+ self.covariances_ = self.covariance_prior_ + nk * (
713
+ sk
714
+ + self.mean_precision_prior_
715
+ / self.mean_precision_
716
+ * np.mean(np.square(diff), 1)
717
+ )
718
+
719
+ # Contrary to the original bishop book, we normalize the covariances
720
+ self.covariances_ /= self.degrees_of_freedom_
721
+
722
+ def _m_step(self, X, log_resp):
723
+ """M step.
724
+
725
+ Parameters
726
+ ----------
727
+ X : array-like of shape (n_samples, n_features)
728
+
729
+ log_resp : array-like of shape (n_samples, n_components)
730
+ Logarithm of the posterior probabilities (or responsibilities) of
731
+ the point of each sample in X.
732
+ """
733
+ n_samples, _ = X.shape
734
+
735
+ nk, xk, sk = _estimate_gaussian_parameters(
736
+ X, np.exp(log_resp), self.reg_covar, self.covariance_type
737
+ )
738
+ self._estimate_weights(nk)
739
+ self._estimate_means(nk, xk)
740
+ self._estimate_precisions(nk, xk, sk)
741
+
742
+ def _estimate_log_weights(self):
743
+ if self.weight_concentration_prior_type == "dirichlet_process":
744
+ digamma_sum = digamma(
745
+ self.weight_concentration_[0] + self.weight_concentration_[1]
746
+ )
747
+ digamma_a = digamma(self.weight_concentration_[0])
748
+ digamma_b = digamma(self.weight_concentration_[1])
749
+ return (
750
+ digamma_a
751
+ - digamma_sum
752
+ + np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1]))
753
+ )
754
+ else:
755
+ # case Variational Gaussian mixture with dirichlet distribution
756
+ return digamma(self.weight_concentration_) - digamma(
757
+ np.sum(self.weight_concentration_)
758
+ )
759
+
760
+ def _estimate_log_prob(self, X):
761
+ _, n_features = X.shape
762
+ # We remove `n_features * np.log(self.degrees_of_freedom_)` because
763
+ # the precision matrix is normalized
764
+ log_gauss = _estimate_log_gaussian_prob(
765
+ X, self.means_, self.precisions_cholesky_, self.covariance_type
766
+ ) - 0.5 * n_features * np.log(self.degrees_of_freedom_)
767
+
768
+ log_lambda = n_features * np.log(2.0) + np.sum(
769
+ digamma(
770
+ 0.5
771
+ * (self.degrees_of_freedom_ - np.arange(0, n_features)[:, np.newaxis])
772
+ ),
773
+ 0,
774
+ )
775
+
776
+ return log_gauss + 0.5 * (log_lambda - n_features / self.mean_precision_)
777
+
778
+ def _compute_lower_bound(self, log_resp, log_prob_norm):
779
+ """Estimate the lower bound of the model.
780
+
781
+ The lower bound on the likelihood (of the training data with respect to
782
+ the model) is used to detect the convergence and has to increase at
783
+ each iteration.
784
+
785
+ Parameters
786
+ ----------
787
+ X : array-like of shape (n_samples, n_features)
788
+
789
+ log_resp : array, shape (n_samples, n_components)
790
+ Logarithm of the posterior probabilities (or responsibilities) of
791
+ the point of each sample in X.
792
+
793
+ log_prob_norm : float
794
+ Logarithm of the probability of each sample in X.
795
+
796
+ Returns
797
+ -------
798
+ lower_bound : float
799
+ """
800
+ # Contrary to the original formula, we have done some simplification
801
+ # and removed all the constant terms.
802
+ (n_features,) = self.mean_prior_.shape
803
+
804
+ # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
805
+ # because the precision matrix is normalized.
806
+ log_det_precisions_chol = _compute_log_det_cholesky(
807
+ self.precisions_cholesky_, self.covariance_type, n_features
808
+ ) - 0.5 * n_features * np.log(self.degrees_of_freedom_)
809
+
810
+ if self.covariance_type == "tied":
811
+ log_wishart = self.n_components * np.float64(
812
+ _log_wishart_norm(
813
+ self.degrees_of_freedom_, log_det_precisions_chol, n_features
814
+ )
815
+ )
816
+ else:
817
+ log_wishart = np.sum(
818
+ _log_wishart_norm(
819
+ self.degrees_of_freedom_, log_det_precisions_chol, n_features
820
+ )
821
+ )
822
+
823
+ if self.weight_concentration_prior_type == "dirichlet_process":
824
+ log_norm_weight = -np.sum(
825
+ betaln(self.weight_concentration_[0], self.weight_concentration_[1])
826
+ )
827
+ else:
828
+ log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)
829
+
830
+ return (
831
+ -np.sum(np.exp(log_resp) * log_resp)
832
+ - log_wishart
833
+ - log_norm_weight
834
+ - 0.5 * n_features * np.sum(np.log(self.mean_precision_))
835
+ )
836
+
837
+ def _get_parameters(self):
838
+ return (
839
+ self.weight_concentration_,
840
+ self.mean_precision_,
841
+ self.means_,
842
+ self.degrees_of_freedom_,
843
+ self.covariances_,
844
+ self.precisions_cholesky_,
845
+ )
846
+
847
+ def _set_parameters(self, params):
848
+ (
849
+ self.weight_concentration_,
850
+ self.mean_precision_,
851
+ self.means_,
852
+ self.degrees_of_freedom_,
853
+ self.covariances_,
854
+ self.precisions_cholesky_,
855
+ ) = params
856
+
857
+ # Weights computation
858
+ if self.weight_concentration_prior_type == "dirichlet_process":
859
+ weight_dirichlet_sum = (
860
+ self.weight_concentration_[0] + self.weight_concentration_[1]
861
+ )
862
+ tmp = self.weight_concentration_[1] / weight_dirichlet_sum
863
+ self.weights_ = (
864
+ self.weight_concentration_[0]
865
+ / weight_dirichlet_sum
866
+ * np.hstack((1, np.cumprod(tmp[:-1])))
867
+ )
868
+ self.weights_ /= np.sum(self.weights_)
869
+ else:
870
+ self.weights_ = self.weight_concentration_ / np.sum(
871
+ self.weight_concentration_
872
+ )
873
+
874
+ # Precisions matrices computation
875
+ if self.covariance_type == "full":
876
+ self.precisions_ = np.array(
877
+ [
878
+ np.dot(prec_chol, prec_chol.T)
879
+ for prec_chol in self.precisions_cholesky_
880
+ ]
881
+ )
882
+
883
+ elif self.covariance_type == "tied":
884
+ self.precisions_ = np.dot(
885
+ self.precisions_cholesky_, self.precisions_cholesky_.T
886
+ )
887
+ else:
888
+ self.precisions_ = self.precisions_cholesky_**2
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py ADDED
@@ -0,0 +1,912 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gaussian Mixture Model."""
2
+
3
+ # Author: Wei Xue <[email protected]>
4
+ # Modified by Thierry Guillemot <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+ import numpy as np
8
+ from scipy import linalg
9
+
10
+ from ..utils import check_array
11
+ from ..utils._param_validation import StrOptions
12
+ from ..utils.extmath import row_norms
13
+ from ._base import BaseMixture, _check_shape
14
+
15
+ ###############################################################################
16
+ # Gaussian mixture shape checkers used by the GaussianMixture class
17
+
18
+
19
+ def _check_weights(weights, n_components):
20
+ """Check the user provided 'weights'.
21
+
22
+ Parameters
23
+ ----------
24
+ weights : array-like of shape (n_components,)
25
+ The proportions of components of each mixture.
26
+
27
+ n_components : int
28
+ Number of components.
29
+
30
+ Returns
31
+ -------
32
+ weights : array, shape (n_components,)
33
+ """
34
+ weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False)
35
+ _check_shape(weights, (n_components,), "weights")
36
+
37
+ # check range
38
+ if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)):
39
+ raise ValueError(
40
+ "The parameter 'weights' should be in the range "
41
+ "[0, 1], but got max value %.5f, min value %.5f"
42
+ % (np.min(weights), np.max(weights))
43
+ )
44
+
45
+ # check normalization
46
+ if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0):
47
+ raise ValueError(
48
+ "The parameter 'weights' should be normalized, but got sum(weights) = %.5f"
49
+ % np.sum(weights)
50
+ )
51
+ return weights
52
+
53
+
54
+ def _check_means(means, n_components, n_features):
55
+ """Validate the provided 'means'.
56
+
57
+ Parameters
58
+ ----------
59
+ means : array-like of shape (n_components, n_features)
60
+ The centers of the current components.
61
+
62
+ n_components : int
63
+ Number of components.
64
+
65
+ n_features : int
66
+ Number of features.
67
+
68
+ Returns
69
+ -------
70
+ means : array, (n_components, n_features)
71
+ """
72
+ means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
73
+ _check_shape(means, (n_components, n_features), "means")
74
+ return means
75
+
76
+
77
+ def _check_precision_positivity(precision, covariance_type):
78
+ """Check a precision vector is positive-definite."""
79
+ if np.any(np.less_equal(precision, 0.0)):
80
+ raise ValueError("'%s precision' should be positive" % covariance_type)
81
+
82
+
83
+ def _check_precision_matrix(precision, covariance_type):
84
+ """Check a precision matrix is symmetric and positive-definite."""
85
+ if not (
86
+ np.allclose(precision, precision.T) and np.all(linalg.eigvalsh(precision) > 0.0)
87
+ ):
88
+ raise ValueError(
89
+ "'%s precision' should be symmetric, positive-definite" % covariance_type
90
+ )
91
+
92
+
93
+ def _check_precisions_full(precisions, covariance_type):
94
+ """Check the precision matrices are symmetric and positive-definite."""
95
+ for prec in precisions:
96
+ _check_precision_matrix(prec, covariance_type)
97
+
98
+
99
+ def _check_precisions(precisions, covariance_type, n_components, n_features):
100
+ """Validate user provided precisions.
101
+
102
+ Parameters
103
+ ----------
104
+ precisions : array-like
105
+ 'full' : shape of (n_components, n_features, n_features)
106
+ 'tied' : shape of (n_features, n_features)
107
+ 'diag' : shape of (n_components, n_features)
108
+ 'spherical' : shape of (n_components,)
109
+
110
+ covariance_type : str
111
+
112
+ n_components : int
113
+ Number of components.
114
+
115
+ n_features : int
116
+ Number of features.
117
+
118
+ Returns
119
+ -------
120
+ precisions : array
121
+ """
122
+ precisions = check_array(
123
+ precisions,
124
+ dtype=[np.float64, np.float32],
125
+ ensure_2d=False,
126
+ allow_nd=covariance_type == "full",
127
+ )
128
+
129
+ precisions_shape = {
130
+ "full": (n_components, n_features, n_features),
131
+ "tied": (n_features, n_features),
132
+ "diag": (n_components, n_features),
133
+ "spherical": (n_components,),
134
+ }
135
+ _check_shape(
136
+ precisions, precisions_shape[covariance_type], "%s precision" % covariance_type
137
+ )
138
+
139
+ _check_precisions = {
140
+ "full": _check_precisions_full,
141
+ "tied": _check_precision_matrix,
142
+ "diag": _check_precision_positivity,
143
+ "spherical": _check_precision_positivity,
144
+ }
145
+ _check_precisions[covariance_type](precisions, covariance_type)
146
+ return precisions
147
+
148
+
149
+ ###############################################################################
150
+ # Gaussian mixture parameters estimators (used by the M-Step)
151
+
152
+
153
+ def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
154
+ """Estimate the full covariance matrices.
155
+
156
+ Parameters
157
+ ----------
158
+ resp : array-like of shape (n_samples, n_components)
159
+
160
+ X : array-like of shape (n_samples, n_features)
161
+
162
+ nk : array-like of shape (n_components,)
163
+
164
+ means : array-like of shape (n_components, n_features)
165
+
166
+ reg_covar : float
167
+
168
+ Returns
169
+ -------
170
+ covariances : array, shape (n_components, n_features, n_features)
171
+ The covariance matrix of the current components.
172
+ """
173
+ n_components, n_features = means.shape
174
+ covariances = np.empty((n_components, n_features, n_features))
175
+ for k in range(n_components):
176
+ diff = X - means[k]
177
+ covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
178
+ covariances[k].flat[:: n_features + 1] += reg_covar
179
+ return covariances
180
+
181
+
182
+ def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
183
+ """Estimate the tied covariance matrix.
184
+
185
+ Parameters
186
+ ----------
187
+ resp : array-like of shape (n_samples, n_components)
188
+
189
+ X : array-like of shape (n_samples, n_features)
190
+
191
+ nk : array-like of shape (n_components,)
192
+
193
+ means : array-like of shape (n_components, n_features)
194
+
195
+ reg_covar : float
196
+
197
+ Returns
198
+ -------
199
+ covariance : array, shape (n_features, n_features)
200
+ The tied covariance matrix of the components.
201
+ """
202
+ avg_X2 = np.dot(X.T, X)
203
+ avg_means2 = np.dot(nk * means.T, means)
204
+ covariance = avg_X2 - avg_means2
205
+ covariance /= nk.sum()
206
+ covariance.flat[:: len(covariance) + 1] += reg_covar
207
+ return covariance
208
+
209
+
210
+ def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
211
+ """Estimate the diagonal covariance vectors.
212
+
213
+ Parameters
214
+ ----------
215
+ responsibilities : array-like of shape (n_samples, n_components)
216
+
217
+ X : array-like of shape (n_samples, n_features)
218
+
219
+ nk : array-like of shape (n_components,)
220
+
221
+ means : array-like of shape (n_components, n_features)
222
+
223
+ reg_covar : float
224
+
225
+ Returns
226
+ -------
227
+ covariances : array, shape (n_components, n_features)
228
+ The covariance vector of the current components.
229
+ """
230
+ avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
231
+ avg_means2 = means**2
232
+ avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
233
+ return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
234
+
235
+
236
+ def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
237
+ """Estimate the spherical variance values.
238
+
239
+ Parameters
240
+ ----------
241
+ responsibilities : array-like of shape (n_samples, n_components)
242
+
243
+ X : array-like of shape (n_samples, n_features)
244
+
245
+ nk : array-like of shape (n_components,)
246
+
247
+ means : array-like of shape (n_components, n_features)
248
+
249
+ reg_covar : float
250
+
251
+ Returns
252
+ -------
253
+ variances : array, shape (n_components,)
254
+ The variance values of each components.
255
+ """
256
+ return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1)
257
+
258
+
259
+ def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
260
+ """Estimate the Gaussian distribution parameters.
261
+
262
+ Parameters
263
+ ----------
264
+ X : array-like of shape (n_samples, n_features)
265
+ The input data array.
266
+
267
+ resp : array-like of shape (n_samples, n_components)
268
+ The responsibilities for each data sample in X.
269
+
270
+ reg_covar : float
271
+ The regularization added to the diagonal of the covariance matrices.
272
+
273
+ covariance_type : {'full', 'tied', 'diag', 'spherical'}
274
+ The type of precision matrices.
275
+
276
+ Returns
277
+ -------
278
+ nk : array-like of shape (n_components,)
279
+ The numbers of data samples in the current components.
280
+
281
+ means : array-like of shape (n_components, n_features)
282
+ The centers of the current components.
283
+
284
+ covariances : array-like
285
+ The covariance matrix of the current components.
286
+ The shape depends of the covariance_type.
287
+ """
288
+ nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
289
+ means = np.dot(resp.T, X) / nk[:, np.newaxis]
290
+ covariances = {
291
+ "full": _estimate_gaussian_covariances_full,
292
+ "tied": _estimate_gaussian_covariances_tied,
293
+ "diag": _estimate_gaussian_covariances_diag,
294
+ "spherical": _estimate_gaussian_covariances_spherical,
295
+ }[covariance_type](resp, X, nk, means, reg_covar)
296
+ return nk, means, covariances
297
+
298
+
299
+ def _compute_precision_cholesky(covariances, covariance_type):
300
+ """Compute the Cholesky decomposition of the precisions.
301
+
302
+ Parameters
303
+ ----------
304
+ covariances : array-like
305
+ The covariance matrix of the current components.
306
+ The shape depends of the covariance_type.
307
+
308
+ covariance_type : {'full', 'tied', 'diag', 'spherical'}
309
+ The type of precision matrices.
310
+
311
+ Returns
312
+ -------
313
+ precisions_cholesky : array-like
314
+ The cholesky decomposition of sample precisions of the current
315
+ components. The shape depends of the covariance_type.
316
+ """
317
+ estimate_precision_error_message = (
318
+ "Fitting the mixture model failed because some components have "
319
+ "ill-defined empirical covariance (for instance caused by singleton "
320
+ "or collapsed samples). Try to decrease the number of components, "
321
+ "or increase reg_covar."
322
+ )
323
+
324
+ if covariance_type == "full":
325
+ n_components, n_features, _ = covariances.shape
326
+ precisions_chol = np.empty((n_components, n_features, n_features))
327
+ for k, covariance in enumerate(covariances):
328
+ try:
329
+ cov_chol = linalg.cholesky(covariance, lower=True)
330
+ except linalg.LinAlgError:
331
+ raise ValueError(estimate_precision_error_message)
332
+ precisions_chol[k] = linalg.solve_triangular(
333
+ cov_chol, np.eye(n_features), lower=True
334
+ ).T
335
+ elif covariance_type == "tied":
336
+ _, n_features = covariances.shape
337
+ try:
338
+ cov_chol = linalg.cholesky(covariances, lower=True)
339
+ except linalg.LinAlgError:
340
+ raise ValueError(estimate_precision_error_message)
341
+ precisions_chol = linalg.solve_triangular(
342
+ cov_chol, np.eye(n_features), lower=True
343
+ ).T
344
+ else:
345
+ if np.any(np.less_equal(covariances, 0.0)):
346
+ raise ValueError(estimate_precision_error_message)
347
+ precisions_chol = 1.0 / np.sqrt(covariances)
348
+ return precisions_chol
349
+
350
+
351
+ def _flipudlr(array):
352
+ """Reverse the rows and columns of an array."""
353
+ return np.flipud(np.fliplr(array))
354
+
355
+
356
+ def _compute_precision_cholesky_from_precisions(precisions, covariance_type):
357
+ r"""Compute the Cholesky decomposition of precisions using precisions themselves.
358
+
359
+ As implemented in :func:`_compute_precision_cholesky`, the `precisions_cholesky_` is
360
+ an upper-triangular matrix for each Gaussian component, which can be expressed as
361
+ the $UU^T$ factorization of the precision matrix for each Gaussian component, where
362
+ $U$ is an upper-triangular matrix.
363
+
364
+ In order to use the Cholesky decomposition to get $UU^T$, the precision matrix
365
+ $\Lambda$ needs to be permutated such that its rows and columns are reversed, which
366
+ can be done by applying a similarity transformation with an exchange matrix $J$,
367
+ where the 1 elements reside on the anti-diagonal and all other elements are 0. In
368
+ particular, the Cholesky decomposition of the transformed precision matrix is
369
+ $J\Lambda J=LL^T$, where $L$ is a lower-triangular matrix. Because $\Lambda=UU^T$
370
+ and $J=J^{-1}=J^T$, the `precisions_cholesky_` for each Gaussian component can be
371
+ expressed as $JLJ$.
372
+
373
+ Refer to #26415 for details.
374
+
375
+ Parameters
376
+ ----------
377
+ precisions : array-like
378
+ The precision matrix of the current components.
379
+ The shape depends on the covariance_type.
380
+
381
+ covariance_type : {'full', 'tied', 'diag', 'spherical'}
382
+ The type of precision matrices.
383
+
384
+ Returns
385
+ -------
386
+ precisions_cholesky : array-like
387
+ The cholesky decomposition of sample precisions of the current
388
+ components. The shape depends on the covariance_type.
389
+ """
390
+ if covariance_type == "full":
391
+ precisions_cholesky = np.array(
392
+ [
393
+ _flipudlr(linalg.cholesky(_flipudlr(precision), lower=True))
394
+ for precision in precisions
395
+ ]
396
+ )
397
+ elif covariance_type == "tied":
398
+ precisions_cholesky = _flipudlr(
399
+ linalg.cholesky(_flipudlr(precisions), lower=True)
400
+ )
401
+ else:
402
+ precisions_cholesky = np.sqrt(precisions)
403
+ return precisions_cholesky
404
+
405
+
406
+ ###############################################################################
407
+ # Gaussian mixture probability estimators
408
+ def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
409
+ """Compute the log-det of the cholesky decomposition of matrices.
410
+
411
+ Parameters
412
+ ----------
413
+ matrix_chol : array-like
414
+ Cholesky decompositions of the matrices.
415
+ 'full' : shape of (n_components, n_features, n_features)
416
+ 'tied' : shape of (n_features, n_features)
417
+ 'diag' : shape of (n_components, n_features)
418
+ 'spherical' : shape of (n_components,)
419
+
420
+ covariance_type : {'full', 'tied', 'diag', 'spherical'}
421
+
422
+ n_features : int
423
+ Number of features.
424
+
425
+ Returns
426
+ -------
427
+ log_det_precision_chol : array-like of shape (n_components,)
428
+ The determinant of the precision matrix for each component.
429
+ """
430
+ if covariance_type == "full":
431
+ n_components, _, _ = matrix_chol.shape
432
+ log_det_chol = np.sum(
433
+ np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1
434
+ )
435
+
436
+ elif covariance_type == "tied":
437
+ log_det_chol = np.sum(np.log(np.diag(matrix_chol)))
438
+
439
+ elif covariance_type == "diag":
440
+ log_det_chol = np.sum(np.log(matrix_chol), axis=1)
441
+
442
+ else:
443
+ log_det_chol = n_features * (np.log(matrix_chol))
444
+
445
+ return log_det_chol
446
+
447
+
448
+ def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
449
+ """Estimate the log Gaussian probability.
450
+
451
+ Parameters
452
+ ----------
453
+ X : array-like of shape (n_samples, n_features)
454
+
455
+ means : array-like of shape (n_components, n_features)
456
+
457
+ precisions_chol : array-like
458
+ Cholesky decompositions of the precision matrices.
459
+ 'full' : shape of (n_components, n_features, n_features)
460
+ 'tied' : shape of (n_features, n_features)
461
+ 'diag' : shape of (n_components, n_features)
462
+ 'spherical' : shape of (n_components,)
463
+
464
+ covariance_type : {'full', 'tied', 'diag', 'spherical'}
465
+
466
+ Returns
467
+ -------
468
+ log_prob : array, shape (n_samples, n_components)
469
+ """
470
+ n_samples, n_features = X.shape
471
+ n_components, _ = means.shape
472
+ # The determinant of the precision matrix from the Cholesky decomposition
473
+ # corresponds to the negative half of the determinant of the full precision
474
+ # matrix.
475
+ # In short: det(precision_chol) = - det(precision) / 2
476
+ log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features)
477
+
478
+ if covariance_type == "full":
479
+ log_prob = np.empty((n_samples, n_components))
480
+ for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
481
+ y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
482
+ log_prob[:, k] = np.sum(np.square(y), axis=1)
483
+
484
+ elif covariance_type == "tied":
485
+ log_prob = np.empty((n_samples, n_components))
486
+ for k, mu in enumerate(means):
487
+ y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
488
+ log_prob[:, k] = np.sum(np.square(y), axis=1)
489
+
490
+ elif covariance_type == "diag":
491
+ precisions = precisions_chol**2
492
+ log_prob = (
493
+ np.sum((means**2 * precisions), 1)
494
+ - 2.0 * np.dot(X, (means * precisions).T)
495
+ + np.dot(X**2, precisions.T)
496
+ )
497
+
498
+ elif covariance_type == "spherical":
499
+ precisions = precisions_chol**2
500
+ log_prob = (
501
+ np.sum(means**2, 1) * precisions
502
+ - 2 * np.dot(X, means.T * precisions)
503
+ + np.outer(row_norms(X, squared=True), precisions)
504
+ )
505
+ # Since we are using the precision of the Cholesky decomposition,
506
+ # `- 0.5 * log_det_precision` becomes `+ log_det_precision_chol`
507
+ return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
508
+
509
+
510
+ class GaussianMixture(BaseMixture):
511
+ """Gaussian Mixture.
512
+
513
+ Representation of a Gaussian mixture model probability distribution.
514
+ This class allows to estimate the parameters of a Gaussian mixture
515
+ distribution.
516
+
517
+ Read more in the :ref:`User Guide <gmm>`.
518
+
519
+ .. versionadded:: 0.18
520
+
521
+ Parameters
522
+ ----------
523
+ n_components : int, default=1
524
+ The number of mixture components.
525
+
526
+ covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
527
+ String describing the type of covariance parameters to use.
528
+ Must be one of:
529
+
530
+ - 'full': each component has its own general covariance matrix.
531
+ - 'tied': all components share the same general covariance matrix.
532
+ - 'diag': each component has its own diagonal covariance matrix.
533
+ - 'spherical': each component has its own single variance.
534
+
535
+ tol : float, default=1e-3
536
+ The convergence threshold. EM iterations will stop when the
537
+ lower bound average gain is below this threshold.
538
+
539
+ reg_covar : float, default=1e-6
540
+ Non-negative regularization added to the diagonal of covariance.
541
+ Allows to assure that the covariance matrices are all positive.
542
+
543
+ max_iter : int, default=100
544
+ The number of EM iterations to perform.
545
+
546
+ n_init : int, default=1
547
+ The number of initializations to perform. The best results are kept.
548
+
549
+ init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \
550
+ default='kmeans'
551
+ The method used to initialize the weights, the means and the
552
+ precisions.
553
+ String must be one of:
554
+
555
+ - 'kmeans' : responsibilities are initialized using kmeans.
556
+ - 'k-means++' : use the k-means++ method to initialize.
557
+ - 'random' : responsibilities are initialized randomly.
558
+ - 'random_from_data' : initial means are randomly selected data points.
559
+
560
+ .. versionchanged:: v1.1
561
+ `init_params` now accepts 'random_from_data' and 'k-means++' as
562
+ initialization methods.
563
+
564
+ weights_init : array-like of shape (n_components, ), default=None
565
+ The user-provided initial weights.
566
+ If it is None, weights are initialized using the `init_params` method.
567
+
568
+ means_init : array-like of shape (n_components, n_features), default=None
569
+ The user-provided initial means,
570
+ If it is None, means are initialized using the `init_params` method.
571
+
572
+ precisions_init : array-like, default=None
573
+ The user-provided initial precisions (inverse of the covariance
574
+ matrices).
575
+ If it is None, precisions are initialized using the 'init_params'
576
+ method.
577
+ The shape depends on 'covariance_type'::
578
+
579
+ (n_components,) if 'spherical',
580
+ (n_features, n_features) if 'tied',
581
+ (n_components, n_features) if 'diag',
582
+ (n_components, n_features, n_features) if 'full'
583
+
584
+ random_state : int, RandomState instance or None, default=None
585
+ Controls the random seed given to the method chosen to initialize the
586
+ parameters (see `init_params`).
587
+ In addition, it controls the generation of random samples from the
588
+ fitted distribution (see the method `sample`).
589
+ Pass an int for reproducible output across multiple function calls.
590
+ See :term:`Glossary <random_state>`.
591
+
592
+ warm_start : bool, default=False
593
+ If 'warm_start' is True, the solution of the last fitting is used as
594
+ initialization for the next call of fit(). This can speed up
595
+ convergence when fit is called several times on similar problems.
596
+ In that case, 'n_init' is ignored and only a single initialization
597
+ occurs upon the first call.
598
+ See :term:`the Glossary <warm_start>`.
599
+
600
+ verbose : int, default=0
601
+ Enable verbose output. If 1 then it prints the current
602
+ initialization and each iteration step. If greater than 1 then
603
+ it prints also the log probability and the time needed
604
+ for each step.
605
+
606
+ verbose_interval : int, default=10
607
+ Number of iteration done before the next print.
608
+
609
+ Attributes
610
+ ----------
611
+ weights_ : array-like of shape (n_components,)
612
+ The weights of each mixture components.
613
+
614
+ means_ : array-like of shape (n_components, n_features)
615
+ The mean of each mixture component.
616
+
617
+ covariances_ : array-like
618
+ The covariance of each mixture component.
619
+ The shape depends on `covariance_type`::
620
+
621
+ (n_components,) if 'spherical',
622
+ (n_features, n_features) if 'tied',
623
+ (n_components, n_features) if 'diag',
624
+ (n_components, n_features, n_features) if 'full'
625
+
626
+ precisions_ : array-like
627
+ The precision matrices for each component in the mixture. A precision
628
+ matrix is the inverse of a covariance matrix. A covariance matrix is
629
+ symmetric positive definite so the mixture of Gaussian can be
630
+ equivalently parameterized by the precision matrices. Storing the
631
+ precision matrices instead of the covariance matrices makes it more
632
+ efficient to compute the log-likelihood of new samples at test time.
633
+ The shape depends on `covariance_type`::
634
+
635
+ (n_components,) if 'spherical',
636
+ (n_features, n_features) if 'tied',
637
+ (n_components, n_features) if 'diag',
638
+ (n_components, n_features, n_features) if 'full'
639
+
640
+ precisions_cholesky_ : array-like
641
+ The cholesky decomposition of the precision matrices of each mixture
642
+ component. A precision matrix is the inverse of a covariance matrix.
643
+ A covariance matrix is symmetric positive definite so the mixture of
644
+ Gaussian can be equivalently parameterized by the precision matrices.
645
+ Storing the precision matrices instead of the covariance matrices makes
646
+ it more efficient to compute the log-likelihood of new samples at test
647
+ time. The shape depends on `covariance_type`::
648
+
649
+ (n_components,) if 'spherical',
650
+ (n_features, n_features) if 'tied',
651
+ (n_components, n_features) if 'diag',
652
+ (n_components, n_features, n_features) if 'full'
653
+
654
+ converged_ : bool
655
+ True when convergence was reached in fit(), False otherwise.
656
+
657
+ n_iter_ : int
658
+ Number of step used by the best fit of EM to reach the convergence.
659
+
660
+ lower_bound_ : float
661
+ Lower bound value on the log-likelihood (of the training data with
662
+ respect to the model) of the best fit of EM.
663
+
664
+ n_features_in_ : int
665
+ Number of features seen during :term:`fit`.
666
+
667
+ .. versionadded:: 0.24
668
+
669
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
670
+ Names of features seen during :term:`fit`. Defined only when `X`
671
+ has feature names that are all strings.
672
+
673
+ .. versionadded:: 1.0
674
+
675
+ See Also
676
+ --------
677
+ BayesianGaussianMixture : Gaussian mixture model fit with a variational
678
+ inference.
679
+
680
+ Examples
681
+ --------
682
+ >>> import numpy as np
683
+ >>> from sklearn.mixture import GaussianMixture
684
+ >>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
685
+ >>> gm = GaussianMixture(n_components=2, random_state=0).fit(X)
686
+ >>> gm.means_
687
+ array([[10., 2.],
688
+ [ 1., 2.]])
689
+ >>> gm.predict([[0, 0], [12, 3]])
690
+ array([1, 0])
691
+ """
692
+
693
+ _parameter_constraints: dict = {
694
+ **BaseMixture._parameter_constraints,
695
+ "covariance_type": [StrOptions({"full", "tied", "diag", "spherical"})],
696
+ "weights_init": ["array-like", None],
697
+ "means_init": ["array-like", None],
698
+ "precisions_init": ["array-like", None],
699
+ }
700
+
701
+ def __init__(
702
+ self,
703
+ n_components=1,
704
+ *,
705
+ covariance_type="full",
706
+ tol=1e-3,
707
+ reg_covar=1e-6,
708
+ max_iter=100,
709
+ n_init=1,
710
+ init_params="kmeans",
711
+ weights_init=None,
712
+ means_init=None,
713
+ precisions_init=None,
714
+ random_state=None,
715
+ warm_start=False,
716
+ verbose=0,
717
+ verbose_interval=10,
718
+ ):
719
+ super().__init__(
720
+ n_components=n_components,
721
+ tol=tol,
722
+ reg_covar=reg_covar,
723
+ max_iter=max_iter,
724
+ n_init=n_init,
725
+ init_params=init_params,
726
+ random_state=random_state,
727
+ warm_start=warm_start,
728
+ verbose=verbose,
729
+ verbose_interval=verbose_interval,
730
+ )
731
+
732
+ self.covariance_type = covariance_type
733
+ self.weights_init = weights_init
734
+ self.means_init = means_init
735
+ self.precisions_init = precisions_init
736
+
737
+ def _check_parameters(self, X):
738
+ """Check the Gaussian mixture parameters are well defined."""
739
+ _, n_features = X.shape
740
+
741
+ if self.weights_init is not None:
742
+ self.weights_init = _check_weights(self.weights_init, self.n_components)
743
+
744
+ if self.means_init is not None:
745
+ self.means_init = _check_means(
746
+ self.means_init, self.n_components, n_features
747
+ )
748
+
749
+ if self.precisions_init is not None:
750
+ self.precisions_init = _check_precisions(
751
+ self.precisions_init,
752
+ self.covariance_type,
753
+ self.n_components,
754
+ n_features,
755
+ )
756
+
757
+ def _initialize_parameters(self, X, random_state):
758
+ # If all the initial parameters are all provided, then there is no need to run
759
+ # the initialization.
760
+ compute_resp = (
761
+ self.weights_init is None
762
+ or self.means_init is None
763
+ or self.precisions_init is None
764
+ )
765
+ if compute_resp:
766
+ super()._initialize_parameters(X, random_state)
767
+ else:
768
+ self._initialize(X, None)
769
+
770
+ def _initialize(self, X, resp):
771
+ """Initialization of the Gaussian mixture parameters.
772
+
773
+ Parameters
774
+ ----------
775
+ X : array-like of shape (n_samples, n_features)
776
+
777
+ resp : array-like of shape (n_samples, n_components)
778
+ """
779
+ n_samples, _ = X.shape
780
+ weights, means, covariances = None, None, None
781
+ if resp is not None:
782
+ weights, means, covariances = _estimate_gaussian_parameters(
783
+ X, resp, self.reg_covar, self.covariance_type
784
+ )
785
+ if self.weights_init is None:
786
+ weights /= n_samples
787
+
788
+ self.weights_ = weights if self.weights_init is None else self.weights_init
789
+ self.means_ = means if self.means_init is None else self.means_init
790
+
791
+ if self.precisions_init is None:
792
+ self.covariances_ = covariances
793
+ self.precisions_cholesky_ = _compute_precision_cholesky(
794
+ covariances, self.covariance_type
795
+ )
796
+ else:
797
+ self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions(
798
+ self.precisions_init, self.covariance_type
799
+ )
800
+
801
+ def _m_step(self, X, log_resp):
802
+ """M step.
803
+
804
+ Parameters
805
+ ----------
806
+ X : array-like of shape (n_samples, n_features)
807
+
808
+ log_resp : array-like of shape (n_samples, n_components)
809
+ Logarithm of the posterior probabilities (or responsibilities) of
810
+ the point of each sample in X.
811
+ """
812
+ self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters(
813
+ X, np.exp(log_resp), self.reg_covar, self.covariance_type
814
+ )
815
+ self.weights_ /= self.weights_.sum()
816
+ self.precisions_cholesky_ = _compute_precision_cholesky(
817
+ self.covariances_, self.covariance_type
818
+ )
819
+
820
+ def _estimate_log_prob(self, X):
821
+ return _estimate_log_gaussian_prob(
822
+ X, self.means_, self.precisions_cholesky_, self.covariance_type
823
+ )
824
+
825
+ def _estimate_log_weights(self):
826
+ return np.log(self.weights_)
827
+
828
+ def _compute_lower_bound(self, _, log_prob_norm):
829
+ return log_prob_norm
830
+
831
+ def _get_parameters(self):
832
+ return (
833
+ self.weights_,
834
+ self.means_,
835
+ self.covariances_,
836
+ self.precisions_cholesky_,
837
+ )
838
+
839
+ def _set_parameters(self, params):
840
+ (
841
+ self.weights_,
842
+ self.means_,
843
+ self.covariances_,
844
+ self.precisions_cholesky_,
845
+ ) = params
846
+
847
+ # Attributes computation
848
+ _, n_features = self.means_.shape
849
+
850
+ if self.covariance_type == "full":
851
+ self.precisions_ = np.empty(self.precisions_cholesky_.shape)
852
+ for k, prec_chol in enumerate(self.precisions_cholesky_):
853
+ self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
854
+
855
+ elif self.covariance_type == "tied":
856
+ self.precisions_ = np.dot(
857
+ self.precisions_cholesky_, self.precisions_cholesky_.T
858
+ )
859
+ else:
860
+ self.precisions_ = self.precisions_cholesky_**2
861
+
862
+ def _n_parameters(self):
863
+ """Return the number of free parameters in the model."""
864
+ _, n_features = self.means_.shape
865
+ if self.covariance_type == "full":
866
+ cov_params = self.n_components * n_features * (n_features + 1) / 2.0
867
+ elif self.covariance_type == "diag":
868
+ cov_params = self.n_components * n_features
869
+ elif self.covariance_type == "tied":
870
+ cov_params = n_features * (n_features + 1) / 2.0
871
+ elif self.covariance_type == "spherical":
872
+ cov_params = self.n_components
873
+ mean_params = n_features * self.n_components
874
+ return int(cov_params + mean_params + self.n_components - 1)
875
+
876
+ def bic(self, X):
877
+ """Bayesian information criterion for the current model on the input X.
878
+
879
+ You can refer to this :ref:`mathematical section <aic_bic>` for more
880
+ details regarding the formulation of the BIC used.
881
+
882
+ Parameters
883
+ ----------
884
+ X : array of shape (n_samples, n_dimensions)
885
+ The input samples.
886
+
887
+ Returns
888
+ -------
889
+ bic : float
890
+ The lower the better.
891
+ """
892
+ return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log(
893
+ X.shape[0]
894
+ )
895
+
896
+ def aic(self, X):
897
+ """Akaike information criterion for the current model on the input X.
898
+
899
+ You can refer to this :ref:`mathematical section <aic_bic>` for more
900
+ details regarding the formulation of the AIC used.
901
+
902
+ Parameters
903
+ ----------
904
+ X : array of shape (n_samples, n_dimensions)
905
+ The input samples.
906
+
907
+ Returns
908
+ -------
909
+ aic : float
910
+ The lower the better.
911
+ """
912
+ return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc ADDED
Binary file (9.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc ADDED
Binary file (31.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Wei Xue <[email protected]>
2
+ # Thierry Guillemot <[email protected]>
3
+ # License: BSD 3 clause
4
+ import copy
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from scipy.special import gammaln
9
+
10
+ from sklearn.exceptions import ConvergenceWarning, NotFittedError
11
+ from sklearn.metrics.cluster import adjusted_rand_score
12
+ from sklearn.mixture import BayesianGaussianMixture
13
+ from sklearn.mixture._bayesian_mixture import _log_dirichlet_norm, _log_wishart_norm
14
+ from sklearn.mixture.tests.test_gaussian_mixture import RandomData
15
+ from sklearn.utils._testing import (
16
+ assert_almost_equal,
17
+ assert_array_equal,
18
+ ignore_warnings,
19
+ )
20
+
21
+ COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"]
22
+ PRIOR_TYPE = ["dirichlet_process", "dirichlet_distribution"]
23
+
24
+
25
+ def test_log_dirichlet_norm():
26
+ rng = np.random.RandomState(0)
27
+
28
+ weight_concentration = rng.rand(2)
29
+ expected_norm = gammaln(np.sum(weight_concentration)) - np.sum(
30
+ gammaln(weight_concentration)
31
+ )
32
+ predected_norm = _log_dirichlet_norm(weight_concentration)
33
+
34
+ assert_almost_equal(expected_norm, predected_norm)
35
+
36
+
37
+ def test_log_wishart_norm():
38
+ rng = np.random.RandomState(0)
39
+
40
+ n_components, n_features = 5, 2
41
+ degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.0
42
+ log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
43
+
44
+ expected_norm = np.empty(5)
45
+ for k, (degrees_of_freedom_k, log_det_k) in enumerate(
46
+ zip(degrees_of_freedom, log_det_precisions_chol)
47
+ ):
48
+ expected_norm[k] = -(
49
+ degrees_of_freedom_k * (log_det_k + 0.5 * n_features * np.log(2.0))
50
+ + np.sum(
51
+ gammaln(
52
+ 0.5
53
+ * (degrees_of_freedom_k - np.arange(0, n_features)[:, np.newaxis])
54
+ ),
55
+ 0,
56
+ )
57
+ ).item()
58
+ predected_norm = _log_wishart_norm(
59
+ degrees_of_freedom, log_det_precisions_chol, n_features
60
+ )
61
+
62
+ assert_almost_equal(expected_norm, predected_norm)
63
+
64
+
65
+ def test_bayesian_mixture_weights_prior_initialisation():
66
+ rng = np.random.RandomState(0)
67
+ n_samples, n_components, n_features = 10, 5, 2
68
+ X = rng.rand(n_samples, n_features)
69
+
70
+ # Check correct init for a given value of weight_concentration_prior
71
+ weight_concentration_prior = rng.rand()
72
+ bgmm = BayesianGaussianMixture(
73
+ weight_concentration_prior=weight_concentration_prior, random_state=rng
74
+ ).fit(X)
75
+ assert_almost_equal(weight_concentration_prior, bgmm.weight_concentration_prior_)
76
+
77
+ # Check correct init for the default value of weight_concentration_prior
78
+ bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X)
79
+ assert_almost_equal(1.0 / n_components, bgmm.weight_concentration_prior_)
80
+
81
+
82
+ def test_bayesian_mixture_mean_prior_initialisation():
83
+ rng = np.random.RandomState(0)
84
+ n_samples, n_components, n_features = 10, 3, 2
85
+ X = rng.rand(n_samples, n_features)
86
+
87
+ # Check correct init for a given value of mean_precision_prior
88
+ mean_precision_prior = rng.rand()
89
+ bgmm = BayesianGaussianMixture(
90
+ mean_precision_prior=mean_precision_prior, random_state=rng
91
+ ).fit(X)
92
+ assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
93
+
94
+ # Check correct init for the default value of mean_precision_prior
95
+ bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
96
+ assert_almost_equal(1.0, bgmm.mean_precision_prior_)
97
+
98
+ # Check correct init for a given value of mean_prior
99
+ mean_prior = rng.rand(n_features)
100
+ bgmm = BayesianGaussianMixture(
101
+ n_components=n_components, mean_prior=mean_prior, random_state=rng
102
+ ).fit(X)
103
+ assert_almost_equal(mean_prior, bgmm.mean_prior_)
104
+
105
+ # Check correct init for the default value of bemean_priorta
106
+ bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X)
107
+ assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
108
+
109
+
110
+ def test_bayesian_mixture_precisions_prior_initialisation():
111
+ rng = np.random.RandomState(0)
112
+ n_samples, n_features = 10, 2
113
+ X = rng.rand(n_samples, n_features)
114
+
115
+ # Check raise message for a bad value of degrees_of_freedom_prior
116
+ bad_degrees_of_freedom_prior_ = n_features - 1.0
117
+ bgmm = BayesianGaussianMixture(
118
+ degrees_of_freedom_prior=bad_degrees_of_freedom_prior_, random_state=rng
119
+ )
120
+ msg = (
121
+ "The parameter 'degrees_of_freedom_prior' should be greater than"
122
+ f" {n_features -1}, but got {bad_degrees_of_freedom_prior_:.3f}."
123
+ )
124
+ with pytest.raises(ValueError, match=msg):
125
+ bgmm.fit(X)
126
+
127
+ # Check correct init for a given value of degrees_of_freedom_prior
128
+ degrees_of_freedom_prior = rng.rand() + n_features - 1.0
129
+ bgmm = BayesianGaussianMixture(
130
+ degrees_of_freedom_prior=degrees_of_freedom_prior, random_state=rng
131
+ ).fit(X)
132
+ assert_almost_equal(degrees_of_freedom_prior, bgmm.degrees_of_freedom_prior_)
133
+
134
+ # Check correct init for the default value of degrees_of_freedom_prior
135
+ degrees_of_freedom_prior_default = n_features
136
+ bgmm = BayesianGaussianMixture(
137
+ degrees_of_freedom_prior=degrees_of_freedom_prior_default, random_state=rng
138
+ ).fit(X)
139
+ assert_almost_equal(
140
+ degrees_of_freedom_prior_default, bgmm.degrees_of_freedom_prior_
141
+ )
142
+
143
+ # Check correct init for a given value of covariance_prior
144
+ covariance_prior = {
145
+ "full": np.cov(X.T, bias=1) + 10,
146
+ "tied": np.cov(X.T, bias=1) + 5,
147
+ "diag": np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
148
+ "spherical": rng.rand(),
149
+ }
150
+
151
+ bgmm = BayesianGaussianMixture(random_state=rng)
152
+ for cov_type in ["full", "tied", "diag", "spherical"]:
153
+ bgmm.covariance_type = cov_type
154
+ bgmm.covariance_prior = covariance_prior[cov_type]
155
+ bgmm.fit(X)
156
+ assert_almost_equal(covariance_prior[cov_type], bgmm.covariance_prior_)
157
+
158
+ # Check correct init for the default value of covariance_prior
159
+ covariance_prior_default = {
160
+ "full": np.atleast_2d(np.cov(X.T)),
161
+ "tied": np.atleast_2d(np.cov(X.T)),
162
+ "diag": np.var(X, axis=0, ddof=1),
163
+ "spherical": np.var(X, axis=0, ddof=1).mean(),
164
+ }
165
+
166
+ bgmm = BayesianGaussianMixture(random_state=0)
167
+ for cov_type in ["full", "tied", "diag", "spherical"]:
168
+ bgmm.covariance_type = cov_type
169
+ bgmm.fit(X)
170
+ assert_almost_equal(covariance_prior_default[cov_type], bgmm.covariance_prior_)
171
+
172
+
173
+ def test_bayesian_mixture_check_is_fitted():
174
+ rng = np.random.RandomState(0)
175
+ n_samples, n_features = 10, 2
176
+
177
+ # Check raise message
178
+ bgmm = BayesianGaussianMixture(random_state=rng)
179
+ X = rng.rand(n_samples, n_features)
180
+
181
+ msg = "This BayesianGaussianMixture instance is not fitted yet."
182
+ with pytest.raises(ValueError, match=msg):
183
+ bgmm.score(X)
184
+
185
+
186
+ def test_bayesian_mixture_weights():
187
+ rng = np.random.RandomState(0)
188
+ n_samples, n_features = 10, 2
189
+
190
+ X = rng.rand(n_samples, n_features)
191
+
192
+ # Case Dirichlet distribution for the weight concentration prior type
193
+ bgmm = BayesianGaussianMixture(
194
+ weight_concentration_prior_type="dirichlet_distribution",
195
+ n_components=3,
196
+ random_state=rng,
197
+ ).fit(X)
198
+
199
+ expected_weights = bgmm.weight_concentration_ / np.sum(bgmm.weight_concentration_)
200
+ assert_almost_equal(expected_weights, bgmm.weights_)
201
+ assert_almost_equal(np.sum(bgmm.weights_), 1.0)
202
+
203
+ # Case Dirichlet process for the weight concentration prior type
204
+ dpgmm = BayesianGaussianMixture(
205
+ weight_concentration_prior_type="dirichlet_process",
206
+ n_components=3,
207
+ random_state=rng,
208
+ ).fit(X)
209
+ weight_dirichlet_sum = (
210
+ dpgmm.weight_concentration_[0] + dpgmm.weight_concentration_[1]
211
+ )
212
+ tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum
213
+ expected_weights = (
214
+ dpgmm.weight_concentration_[0]
215
+ / weight_dirichlet_sum
216
+ * np.hstack((1, np.cumprod(tmp[:-1])))
217
+ )
218
+ expected_weights /= np.sum(expected_weights)
219
+ assert_almost_equal(expected_weights, dpgmm.weights_)
220
+ assert_almost_equal(np.sum(dpgmm.weights_), 1.0)
221
+
222
+
223
+ @ignore_warnings(category=ConvergenceWarning)
224
+ def test_monotonic_likelihood():
225
+ # We check that each step of the each step of variational inference without
226
+ # regularization improve monotonically the training set of the bound
227
+ rng = np.random.RandomState(0)
228
+ rand_data = RandomData(rng, scale=20)
229
+ n_components = rand_data.n_components
230
+
231
+ for prior_type in PRIOR_TYPE:
232
+ for covar_type in COVARIANCE_TYPE:
233
+ X = rand_data.X[covar_type]
234
+ bgmm = BayesianGaussianMixture(
235
+ weight_concentration_prior_type=prior_type,
236
+ n_components=2 * n_components,
237
+ covariance_type=covar_type,
238
+ warm_start=True,
239
+ max_iter=1,
240
+ random_state=rng,
241
+ tol=1e-3,
242
+ )
243
+ current_lower_bound = -np.inf
244
+ # Do one training iteration at a time so we can make sure that the
245
+ # training log likelihood increases after each iteration.
246
+ for _ in range(600):
247
+ prev_lower_bound = current_lower_bound
248
+ current_lower_bound = bgmm.fit(X).lower_bound_
249
+ assert current_lower_bound >= prev_lower_bound
250
+
251
+ if bgmm.converged_:
252
+ break
253
+ assert bgmm.converged_
254
+
255
+
256
+ def test_compare_covar_type():
257
+ # We can compare the 'full' precision with the other cov_type if we apply
258
+ # 1 iter of the M-step (done during _initialize_parameters).
259
+ rng = np.random.RandomState(0)
260
+ rand_data = RandomData(rng, scale=7)
261
+ X = rand_data.X["full"]
262
+ n_components = rand_data.n_components
263
+
264
+ for prior_type in PRIOR_TYPE:
265
+ # Computation of the full_covariance
266
+ bgmm = BayesianGaussianMixture(
267
+ weight_concentration_prior_type=prior_type,
268
+ n_components=2 * n_components,
269
+ covariance_type="full",
270
+ max_iter=1,
271
+ random_state=0,
272
+ tol=1e-7,
273
+ )
274
+ bgmm._check_parameters(X)
275
+ bgmm._initialize_parameters(X, np.random.RandomState(0))
276
+ full_covariances = (
277
+ bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis]
278
+ )
279
+
280
+ # Check tied_covariance = mean(full_covariances, 0)
281
+ bgmm = BayesianGaussianMixture(
282
+ weight_concentration_prior_type=prior_type,
283
+ n_components=2 * n_components,
284
+ covariance_type="tied",
285
+ max_iter=1,
286
+ random_state=0,
287
+ tol=1e-7,
288
+ )
289
+ bgmm._check_parameters(X)
290
+ bgmm._initialize_parameters(X, np.random.RandomState(0))
291
+
292
+ tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
293
+ assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
294
+
295
+ # Check diag_covariance = diag(full_covariances)
296
+ bgmm = BayesianGaussianMixture(
297
+ weight_concentration_prior_type=prior_type,
298
+ n_components=2 * n_components,
299
+ covariance_type="diag",
300
+ max_iter=1,
301
+ random_state=0,
302
+ tol=1e-7,
303
+ )
304
+ bgmm._check_parameters(X)
305
+ bgmm._initialize_parameters(X, np.random.RandomState(0))
306
+
307
+ diag_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis]
308
+ assert_almost_equal(
309
+ diag_covariances, np.array([np.diag(cov) for cov in full_covariances])
310
+ )
311
+
312
+ # Check spherical_covariance = np.mean(diag_covariances, 0)
313
+ bgmm = BayesianGaussianMixture(
314
+ weight_concentration_prior_type=prior_type,
315
+ n_components=2 * n_components,
316
+ covariance_type="spherical",
317
+ max_iter=1,
318
+ random_state=0,
319
+ tol=1e-7,
320
+ )
321
+ bgmm._check_parameters(X)
322
+ bgmm._initialize_parameters(X, np.random.RandomState(0))
323
+
324
+ spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
325
+ assert_almost_equal(spherical_covariances, np.mean(diag_covariances, 1))
326
+
327
+
328
+ @ignore_warnings(category=ConvergenceWarning)
329
+ def test_check_covariance_precision():
330
+ # We check that the dot product of the covariance and the precision
331
+ # matrices is identity.
332
+ rng = np.random.RandomState(0)
333
+ rand_data = RandomData(rng, scale=7)
334
+ n_components, n_features = 2 * rand_data.n_components, 2
335
+
336
+ # Computation of the full_covariance
337
+ bgmm = BayesianGaussianMixture(
338
+ n_components=n_components, max_iter=100, random_state=rng, tol=1e-3, reg_covar=0
339
+ )
340
+ for covar_type in COVARIANCE_TYPE:
341
+ bgmm.covariance_type = covar_type
342
+ bgmm.fit(rand_data.X[covar_type])
343
+
344
+ if covar_type == "full":
345
+ for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
346
+ assert_almost_equal(np.dot(covar, precision), np.eye(n_features))
347
+ elif covar_type == "tied":
348
+ assert_almost_equal(
349
+ np.dot(bgmm.covariances_, bgmm.precisions_), np.eye(n_features)
350
+ )
351
+
352
+ elif covar_type == "diag":
353
+ assert_almost_equal(
354
+ bgmm.covariances_ * bgmm.precisions_,
355
+ np.ones((n_components, n_features)),
356
+ )
357
+
358
+ else:
359
+ assert_almost_equal(
360
+ bgmm.covariances_ * bgmm.precisions_, np.ones(n_components)
361
+ )
362
+
363
+
364
+ @ignore_warnings(category=ConvergenceWarning)
365
+ def test_invariant_translation():
366
+ # We check here that adding a constant in the data change correctly the
367
+ # parameters of the mixture
368
+ rng = np.random.RandomState(0)
369
+ rand_data = RandomData(rng, scale=100)
370
+ n_components = 2 * rand_data.n_components
371
+
372
+ for prior_type in PRIOR_TYPE:
373
+ for covar_type in COVARIANCE_TYPE:
374
+ X = rand_data.X[covar_type]
375
+ bgmm1 = BayesianGaussianMixture(
376
+ weight_concentration_prior_type=prior_type,
377
+ n_components=n_components,
378
+ max_iter=100,
379
+ random_state=0,
380
+ tol=1e-3,
381
+ reg_covar=0,
382
+ ).fit(X)
383
+ bgmm2 = BayesianGaussianMixture(
384
+ weight_concentration_prior_type=prior_type,
385
+ n_components=n_components,
386
+ max_iter=100,
387
+ random_state=0,
388
+ tol=1e-3,
389
+ reg_covar=0,
390
+ ).fit(X + 100)
391
+
392
+ assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)
393
+ assert_almost_equal(bgmm1.weights_, bgmm2.weights_)
394
+ assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_)
395
+
396
+
397
+ @pytest.mark.filterwarnings("ignore:.*did not converge.*")
398
+ @pytest.mark.parametrize(
399
+ "seed, max_iter, tol",
400
+ [
401
+ (0, 2, 1e-7), # strict non-convergence
402
+ (1, 2, 1e-1), # loose non-convergence
403
+ (3, 300, 1e-7), # strict convergence
404
+ (4, 300, 1e-1), # loose convergence
405
+ ],
406
+ )
407
+ def test_bayesian_mixture_fit_predict(seed, max_iter, tol):
408
+ rng = np.random.RandomState(seed)
409
+ rand_data = RandomData(rng, n_samples=50, scale=7)
410
+ n_components = 2 * rand_data.n_components
411
+
412
+ for covar_type in COVARIANCE_TYPE:
413
+ bgmm1 = BayesianGaussianMixture(
414
+ n_components=n_components,
415
+ max_iter=max_iter,
416
+ random_state=rng,
417
+ tol=tol,
418
+ reg_covar=0,
419
+ )
420
+ bgmm1.covariance_type = covar_type
421
+ bgmm2 = copy.deepcopy(bgmm1)
422
+ X = rand_data.X[covar_type]
423
+
424
+ Y_pred1 = bgmm1.fit(X).predict(X)
425
+ Y_pred2 = bgmm2.fit_predict(X)
426
+ assert_array_equal(Y_pred1, Y_pred2)
427
+
428
+
429
+ def test_bayesian_mixture_fit_predict_n_init():
430
+ # Check that fit_predict is equivalent to fit.predict, when n_init > 1
431
+ X = np.random.RandomState(0).randn(50, 5)
432
+ gm = BayesianGaussianMixture(n_components=5, n_init=10, random_state=0)
433
+ y_pred1 = gm.fit_predict(X)
434
+ y_pred2 = gm.predict(X)
435
+ assert_array_equal(y_pred1, y_pred2)
436
+
437
+
438
+ def test_bayesian_mixture_predict_predict_proba():
439
+ # this is the same test as test_gaussian_mixture_predict_predict_proba()
440
+ rng = np.random.RandomState(0)
441
+ rand_data = RandomData(rng)
442
+ for prior_type in PRIOR_TYPE:
443
+ for covar_type in COVARIANCE_TYPE:
444
+ X = rand_data.X[covar_type]
445
+ Y = rand_data.Y
446
+ bgmm = BayesianGaussianMixture(
447
+ n_components=rand_data.n_components,
448
+ random_state=rng,
449
+ weight_concentration_prior_type=prior_type,
450
+ covariance_type=covar_type,
451
+ )
452
+
453
+ # Check a warning message arrive if we don't do fit
454
+ msg = (
455
+ "This BayesianGaussianMixture instance is not fitted yet. "
456
+ "Call 'fit' with appropriate arguments before using this "
457
+ "estimator."
458
+ )
459
+ with pytest.raises(NotFittedError, match=msg):
460
+ bgmm.predict(X)
461
+
462
+ bgmm.fit(X)
463
+ Y_pred = bgmm.predict(X)
464
+ Y_pred_proba = bgmm.predict_proba(X).argmax(axis=1)
465
+ assert_array_equal(Y_pred, Y_pred_proba)
466
+ assert adjusted_rand_score(Y, Y_pred) >= 0.95
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py ADDED
@@ -0,0 +1,1422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Wei Xue <[email protected]>
2
+ # Thierry Guillemot <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import copy
6
+ import itertools
7
+ import re
8
+ import sys
9
+ import warnings
10
+ from io import StringIO
11
+ from unittest.mock import Mock
12
+
13
+ import numpy as np
14
+ import pytest
15
+ from scipy import linalg, stats
16
+
17
+ import sklearn
18
+ from sklearn.cluster import KMeans
19
+ from sklearn.covariance import EmpiricalCovariance
20
+ from sklearn.datasets import make_spd_matrix
21
+ from sklearn.exceptions import ConvergenceWarning, NotFittedError
22
+ from sklearn.metrics.cluster import adjusted_rand_score
23
+ from sklearn.mixture import GaussianMixture
24
+ from sklearn.mixture._gaussian_mixture import (
25
+ _compute_log_det_cholesky,
26
+ _compute_precision_cholesky,
27
+ _estimate_gaussian_covariances_diag,
28
+ _estimate_gaussian_covariances_full,
29
+ _estimate_gaussian_covariances_spherical,
30
+ _estimate_gaussian_covariances_tied,
31
+ _estimate_gaussian_parameters,
32
+ )
33
+ from sklearn.utils._testing import (
34
+ assert_allclose,
35
+ assert_almost_equal,
36
+ assert_array_almost_equal,
37
+ assert_array_equal,
38
+ ignore_warnings,
39
+ )
40
+ from sklearn.utils.extmath import fast_logdet
41
+
42
+ COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"]
43
+
44
+
45
+ def generate_data(n_samples, n_features, weights, means, precisions, covariance_type):
46
+ rng = np.random.RandomState(0)
47
+
48
+ X = []
49
+ if covariance_type == "spherical":
50
+ for _, (w, m, c) in enumerate(zip(weights, means, precisions["spherical"])):
51
+ X.append(
52
+ rng.multivariate_normal(
53
+ m, c * np.eye(n_features), int(np.round(w * n_samples))
54
+ )
55
+ )
56
+ if covariance_type == "diag":
57
+ for _, (w, m, c) in enumerate(zip(weights, means, precisions["diag"])):
58
+ X.append(
59
+ rng.multivariate_normal(m, np.diag(c), int(np.round(w * n_samples)))
60
+ )
61
+ if covariance_type == "tied":
62
+ for _, (w, m) in enumerate(zip(weights, means)):
63
+ X.append(
64
+ rng.multivariate_normal(
65
+ m, precisions["tied"], int(np.round(w * n_samples))
66
+ )
67
+ )
68
+ if covariance_type == "full":
69
+ for _, (w, m, c) in enumerate(zip(weights, means, precisions["full"])):
70
+ X.append(rng.multivariate_normal(m, c, int(np.round(w * n_samples))))
71
+
72
+ X = np.vstack(X)
73
+ return X
74
+
75
+
76
+ class RandomData:
77
+ def __init__(self, rng, n_samples=200, n_components=2, n_features=2, scale=50):
78
+ self.n_samples = n_samples
79
+ self.n_components = n_components
80
+ self.n_features = n_features
81
+
82
+ self.weights = rng.rand(n_components)
83
+ self.weights = self.weights / self.weights.sum()
84
+ self.means = rng.rand(n_components, n_features) * scale
85
+ self.covariances = {
86
+ "spherical": 0.5 + rng.rand(n_components),
87
+ "diag": (0.5 + rng.rand(n_components, n_features)) ** 2,
88
+ "tied": make_spd_matrix(n_features, random_state=rng),
89
+ "full": np.array(
90
+ [
91
+ make_spd_matrix(n_features, random_state=rng) * 0.5
92
+ for _ in range(n_components)
93
+ ]
94
+ ),
95
+ }
96
+ self.precisions = {
97
+ "spherical": 1.0 / self.covariances["spherical"],
98
+ "diag": 1.0 / self.covariances["diag"],
99
+ "tied": linalg.inv(self.covariances["tied"]),
100
+ "full": np.array(
101
+ [linalg.inv(covariance) for covariance in self.covariances["full"]]
102
+ ),
103
+ }
104
+
105
+ self.X = dict(
106
+ zip(
107
+ COVARIANCE_TYPE,
108
+ [
109
+ generate_data(
110
+ n_samples,
111
+ n_features,
112
+ self.weights,
113
+ self.means,
114
+ self.covariances,
115
+ covar_type,
116
+ )
117
+ for covar_type in COVARIANCE_TYPE
118
+ ],
119
+ )
120
+ )
121
+ self.Y = np.hstack(
122
+ [
123
+ np.full(int(np.round(w * n_samples)), k, dtype=int)
124
+ for k, w in enumerate(self.weights)
125
+ ]
126
+ )
127
+
128
+
129
+ def test_gaussian_mixture_attributes():
130
+ # test bad parameters
131
+ rng = np.random.RandomState(0)
132
+ X = rng.rand(10, 2)
133
+
134
+ # test good parameters
135
+ n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
136
+ covariance_type, init_params = "full", "random"
137
+ gmm = GaussianMixture(
138
+ n_components=n_components,
139
+ tol=tol,
140
+ n_init=n_init,
141
+ max_iter=max_iter,
142
+ reg_covar=reg_covar,
143
+ covariance_type=covariance_type,
144
+ init_params=init_params,
145
+ ).fit(X)
146
+
147
+ assert gmm.n_components == n_components
148
+ assert gmm.covariance_type == covariance_type
149
+ assert gmm.tol == tol
150
+ assert gmm.reg_covar == reg_covar
151
+ assert gmm.max_iter == max_iter
152
+ assert gmm.n_init == n_init
153
+ assert gmm.init_params == init_params
154
+
155
+
156
+ def test_check_weights():
157
+ rng = np.random.RandomState(0)
158
+ rand_data = RandomData(rng)
159
+
160
+ n_components = rand_data.n_components
161
+ X = rand_data.X["full"]
162
+
163
+ g = GaussianMixture(n_components=n_components)
164
+
165
+ # Check bad shape
166
+ weights_bad_shape = rng.rand(n_components, 1)
167
+ g.weights_init = weights_bad_shape
168
+ msg = re.escape(
169
+ "The parameter 'weights' should have the shape of "
170
+ f"({n_components},), but got {str(weights_bad_shape.shape)}"
171
+ )
172
+ with pytest.raises(ValueError, match=msg):
173
+ g.fit(X)
174
+
175
+ # Check bad range
176
+ weights_bad_range = rng.rand(n_components) + 1
177
+ g.weights_init = weights_bad_range
178
+ msg = re.escape(
179
+ "The parameter 'weights' should be in the range [0, 1], but got"
180
+ f" max value {np.min(weights_bad_range):.5f}, "
181
+ f"min value {np.max(weights_bad_range):.5f}"
182
+ )
183
+ with pytest.raises(ValueError, match=msg):
184
+ g.fit(X)
185
+
186
+ # Check bad normalization
187
+ weights_bad_norm = rng.rand(n_components)
188
+ weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
189
+ g.weights_init = weights_bad_norm
190
+ msg = re.escape(
191
+ "The parameter 'weights' should be normalized, "
192
+ f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}"
193
+ )
194
+ with pytest.raises(ValueError, match=msg):
195
+ g.fit(X)
196
+
197
+ # Check good weights matrix
198
+ weights = rand_data.weights
199
+ g = GaussianMixture(weights_init=weights, n_components=n_components)
200
+ g.fit(X)
201
+ assert_array_equal(weights, g.weights_init)
202
+
203
+
204
+ def test_check_means():
205
+ rng = np.random.RandomState(0)
206
+ rand_data = RandomData(rng)
207
+
208
+ n_components, n_features = rand_data.n_components, rand_data.n_features
209
+ X = rand_data.X["full"]
210
+
211
+ g = GaussianMixture(n_components=n_components)
212
+
213
+ # Check means bad shape
214
+ means_bad_shape = rng.rand(n_components + 1, n_features)
215
+ g.means_init = means_bad_shape
216
+ msg = "The parameter 'means' should have the shape of "
217
+ with pytest.raises(ValueError, match=msg):
218
+ g.fit(X)
219
+
220
+ # Check good means matrix
221
+ means = rand_data.means
222
+ g.means_init = means
223
+ g.fit(X)
224
+ assert_array_equal(means, g.means_init)
225
+
226
+
227
+ def test_check_precisions():
228
+ rng = np.random.RandomState(0)
229
+ rand_data = RandomData(rng)
230
+
231
+ n_components, n_features = rand_data.n_components, rand_data.n_features
232
+
233
+ # Define the bad precisions for each covariance_type
234
+ precisions_bad_shape = {
235
+ "full": np.ones((n_components + 1, n_features, n_features)),
236
+ "tied": np.ones((n_features + 1, n_features + 1)),
237
+ "diag": np.ones((n_components + 1, n_features)),
238
+ "spherical": np.ones((n_components + 1)),
239
+ }
240
+
241
+ # Define not positive-definite precisions
242
+ precisions_not_pos = np.ones((n_components, n_features, n_features))
243
+ precisions_not_pos[0] = np.eye(n_features)
244
+ precisions_not_pos[0, 0, 0] = -1.0
245
+
246
+ precisions_not_positive = {
247
+ "full": precisions_not_pos,
248
+ "tied": precisions_not_pos[0],
249
+ "diag": np.full((n_components, n_features), -1.0),
250
+ "spherical": np.full(n_components, -1.0),
251
+ }
252
+
253
+ not_positive_errors = {
254
+ "full": "symmetric, positive-definite",
255
+ "tied": "symmetric, positive-definite",
256
+ "diag": "positive",
257
+ "spherical": "positive",
258
+ }
259
+
260
+ for covar_type in COVARIANCE_TYPE:
261
+ X = RandomData(rng).X[covar_type]
262
+ g = GaussianMixture(
263
+ n_components=n_components, covariance_type=covar_type, random_state=rng
264
+ )
265
+
266
+ # Check precisions with bad shapes
267
+ g.precisions_init = precisions_bad_shape[covar_type]
268
+ msg = f"The parameter '{covar_type} precision' should have the shape of"
269
+ with pytest.raises(ValueError, match=msg):
270
+ g.fit(X)
271
+
272
+ # Check not positive precisions
273
+ g.precisions_init = precisions_not_positive[covar_type]
274
+ msg = f"'{covar_type} precision' should be {not_positive_errors[covar_type]}"
275
+ with pytest.raises(ValueError, match=msg):
276
+ g.fit(X)
277
+
278
+ # Check the correct init of precisions_init
279
+ g.precisions_init = rand_data.precisions[covar_type]
280
+ g.fit(X)
281
+ assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
282
+
283
+
284
+ def test_suffstat_sk_full():
285
+ # compare the precision matrix compute from the
286
+ # EmpiricalCovariance.covariance fitted on X*sqrt(resp)
287
+ # with _sufficient_sk_full, n_components=1
288
+ rng = np.random.RandomState(0)
289
+ n_samples, n_features = 500, 2
290
+
291
+ # special case 1, assuming data is "centered"
292
+ X = rng.rand(n_samples, n_features)
293
+ resp = rng.rand(n_samples, 1)
294
+ X_resp = np.sqrt(resp) * X
295
+ nk = np.array([n_samples])
296
+ xk = np.zeros((1, n_features))
297
+ covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
298
+ ecov = EmpiricalCovariance(assume_centered=True)
299
+ ecov.fit(X_resp)
300
+ assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0)
301
+ assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0)
302
+
303
+ # check the precision computation
304
+ precs_chol_pred = _compute_precision_cholesky(covars_pred, "full")
305
+ precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
306
+ precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
307
+ assert_array_almost_equal(precs_est, precs_pred)
308
+
309
+ # special case 2, assuming resp are all ones
310
+ resp = np.ones((n_samples, 1))
311
+ nk = np.array([n_samples])
312
+ xk = X.mean(axis=0).reshape((1, -1))
313
+ covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
314
+ ecov = EmpiricalCovariance(assume_centered=False)
315
+ ecov.fit(X)
316
+ assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0)
317
+ assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0)
318
+
319
+ # check the precision computation
320
+ precs_chol_pred = _compute_precision_cholesky(covars_pred, "full")
321
+ precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
322
+ precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
323
+ assert_array_almost_equal(precs_est, precs_pred)
324
+
325
+
326
+ def test_suffstat_sk_tied():
327
+ # use equation Nk * Sk / N = S_tied
328
+ rng = np.random.RandomState(0)
329
+ n_samples, n_features, n_components = 500, 2, 2
330
+
331
+ resp = rng.rand(n_samples, n_components)
332
+ resp = resp / resp.sum(axis=1)[:, np.newaxis]
333
+ X = rng.rand(n_samples, n_features)
334
+ nk = resp.sum(axis=0)
335
+ xk = np.dot(resp.T, X) / nk[:, np.newaxis]
336
+
337
+ covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
338
+ covars_pred_full = (
339
+ np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full, 0) / n_samples
340
+ )
341
+
342
+ covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
343
+
344
+ ecov = EmpiricalCovariance()
345
+ ecov.covariance_ = covars_pred_full
346
+ assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="frobenius"), 0)
347
+ assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="spectral"), 0)
348
+
349
+ # check the precision computation
350
+ precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, "tied")
351
+ precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
352
+ precs_est = linalg.inv(covars_pred_tied)
353
+ assert_array_almost_equal(precs_est, precs_pred)
354
+
355
+
356
+ def test_suffstat_sk_diag():
357
+ # test against 'full' case
358
+ rng = np.random.RandomState(0)
359
+ n_samples, n_features, n_components = 500, 2, 2
360
+
361
+ resp = rng.rand(n_samples, n_components)
362
+ resp = resp / resp.sum(axis=1)[:, np.newaxis]
363
+ X = rng.rand(n_samples, n_features)
364
+ nk = resp.sum(axis=0)
365
+ xk = np.dot(resp.T, X) / nk[:, np.newaxis]
366
+ covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
367
+ covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
368
+
369
+ ecov = EmpiricalCovariance()
370
+ for cov_full, cov_diag in zip(covars_pred_full, covars_pred_diag):
371
+ ecov.covariance_ = np.diag(np.diag(cov_full))
372
+ cov_diag = np.diag(cov_diag)
373
+ assert_almost_equal(ecov.error_norm(cov_diag, norm="frobenius"), 0)
374
+ assert_almost_equal(ecov.error_norm(cov_diag, norm="spectral"), 0)
375
+
376
+ # check the precision computation
377
+ precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, "diag")
378
+ assert_almost_equal(covars_pred_diag, 1.0 / precs_chol_pred**2)
379
+
380
+
381
+ def test_gaussian_suffstat_sk_spherical():
382
+ # computing spherical covariance equals to the variance of one-dimension
383
+ # data after flattening, n_components=1
384
+ rng = np.random.RandomState(0)
385
+ n_samples, n_features = 500, 2
386
+
387
+ X = rng.rand(n_samples, n_features)
388
+ X = X - X.mean()
389
+ resp = np.ones((n_samples, 1))
390
+ nk = np.array([n_samples])
391
+ xk = X.mean()
392
+ covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0)
393
+ covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / (
394
+ n_features * n_samples
395
+ )
396
+ assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)
397
+
398
+ # check the precision computation
399
+ precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, "spherical")
400
+ assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2)
401
+
402
+
403
+ def test_compute_log_det_cholesky():
404
+ n_features = 2
405
+ rand_data = RandomData(np.random.RandomState(0))
406
+
407
+ for covar_type in COVARIANCE_TYPE:
408
+ covariance = rand_data.covariances[covar_type]
409
+
410
+ if covar_type == "full":
411
+ predected_det = np.array([linalg.det(cov) for cov in covariance])
412
+ elif covar_type == "tied":
413
+ predected_det = linalg.det(covariance)
414
+ elif covar_type == "diag":
415
+ predected_det = np.array([np.prod(cov) for cov in covariance])
416
+ elif covar_type == "spherical":
417
+ predected_det = covariance**n_features
418
+
419
+ # We compute the cholesky decomposition of the covariance matrix
420
+ expected_det = _compute_log_det_cholesky(
421
+ _compute_precision_cholesky(covariance, covar_type),
422
+ covar_type,
423
+ n_features=n_features,
424
+ )
425
+ assert_array_almost_equal(expected_det, -0.5 * np.log(predected_det))
426
+
427
+
428
+ def _naive_lmvnpdf_diag(X, means, covars):
429
+ resp = np.empty((len(X), len(means)))
430
+ stds = np.sqrt(covars)
431
+ for i, (mean, std) in enumerate(zip(means, stds)):
432
+ resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
433
+ return resp
434
+
435
+
436
+ def test_gaussian_mixture_log_probabilities():
437
+ from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob
438
+
439
+ # test against with _naive_lmvnpdf_diag
440
+ rng = np.random.RandomState(0)
441
+ rand_data = RandomData(rng)
442
+ n_samples = 500
443
+ n_features = rand_data.n_features
444
+ n_components = rand_data.n_components
445
+
446
+ means = rand_data.means
447
+ covars_diag = rng.rand(n_components, n_features)
448
+ X = rng.rand(n_samples, n_features)
449
+ log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
450
+
451
+ # full covariances
452
+ precs_full = np.array([np.diag(1.0 / np.sqrt(x)) for x in covars_diag])
453
+
454
+ log_prob = _estimate_log_gaussian_prob(X, means, precs_full, "full")
455
+ assert_array_almost_equal(log_prob, log_prob_naive)
456
+
457
+ # diag covariances
458
+ precs_chol_diag = 1.0 / np.sqrt(covars_diag)
459
+ log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, "diag")
460
+ assert_array_almost_equal(log_prob, log_prob_naive)
461
+
462
+ # tied
463
+ covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
464
+ precs_tied = np.diag(np.sqrt(1.0 / covars_tied))
465
+
466
+ log_prob_naive = _naive_lmvnpdf_diag(X, means, [covars_tied] * n_components)
467
+ log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, "tied")
468
+
469
+ assert_array_almost_equal(log_prob, log_prob_naive)
470
+
471
+ # spherical
472
+ covars_spherical = covars_diag.mean(axis=1)
473
+ precs_spherical = 1.0 / np.sqrt(covars_diag.mean(axis=1))
474
+ log_prob_naive = _naive_lmvnpdf_diag(
475
+ X, means, [[k] * n_features for k in covars_spherical]
476
+ )
477
+ log_prob = _estimate_log_gaussian_prob(X, means, precs_spherical, "spherical")
478
+ assert_array_almost_equal(log_prob, log_prob_naive)
479
+
480
+
481
+ # skip tests on weighted_log_probabilities, log_weights
482
+
483
+
484
+ def test_gaussian_mixture_estimate_log_prob_resp():
485
+ # test whether responsibilities are normalized
486
+ rng = np.random.RandomState(0)
487
+ rand_data = RandomData(rng, scale=5)
488
+ n_samples = rand_data.n_samples
489
+ n_features = rand_data.n_features
490
+ n_components = rand_data.n_components
491
+
492
+ X = rng.rand(n_samples, n_features)
493
+ for covar_type in COVARIANCE_TYPE:
494
+ weights = rand_data.weights
495
+ means = rand_data.means
496
+ precisions = rand_data.precisions[covar_type]
497
+ g = GaussianMixture(
498
+ n_components=n_components,
499
+ random_state=rng,
500
+ weights_init=weights,
501
+ means_init=means,
502
+ precisions_init=precisions,
503
+ covariance_type=covar_type,
504
+ )
505
+ g.fit(X)
506
+ resp = g.predict_proba(X)
507
+ assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
508
+ assert_array_equal(g.weights_init, weights)
509
+ assert_array_equal(g.means_init, means)
510
+ assert_array_equal(g.precisions_init, precisions)
511
+
512
+
513
+ def test_gaussian_mixture_predict_predict_proba():
514
+ rng = np.random.RandomState(0)
515
+ rand_data = RandomData(rng)
516
+ for covar_type in COVARIANCE_TYPE:
517
+ X = rand_data.X[covar_type]
518
+ Y = rand_data.Y
519
+ g = GaussianMixture(
520
+ n_components=rand_data.n_components,
521
+ random_state=rng,
522
+ weights_init=rand_data.weights,
523
+ means_init=rand_data.means,
524
+ precisions_init=rand_data.precisions[covar_type],
525
+ covariance_type=covar_type,
526
+ )
527
+
528
+ # Check a warning message arrive if we don't do fit
529
+ msg = (
530
+ "This GaussianMixture instance is not fitted yet. Call 'fit' "
531
+ "with appropriate arguments before using this estimator."
532
+ )
533
+ with pytest.raises(NotFittedError, match=msg):
534
+ g.predict(X)
535
+
536
+ g.fit(X)
537
+ Y_pred = g.predict(X)
538
+ Y_pred_proba = g.predict_proba(X).argmax(axis=1)
539
+ assert_array_equal(Y_pred, Y_pred_proba)
540
+ assert adjusted_rand_score(Y, Y_pred) > 0.95
541
+
542
+
543
+ @pytest.mark.filterwarnings("ignore:.*did not converge.*")
544
+ @pytest.mark.parametrize(
545
+ "seed, max_iter, tol",
546
+ [
547
+ (0, 2, 1e-7), # strict non-convergence
548
+ (1, 2, 1e-1), # loose non-convergence
549
+ (3, 300, 1e-7), # strict convergence
550
+ (4, 300, 1e-1), # loose convergence
551
+ ],
552
+ )
553
+ def test_gaussian_mixture_fit_predict(seed, max_iter, tol):
554
+ rng = np.random.RandomState(seed)
555
+ rand_data = RandomData(rng)
556
+ for covar_type in COVARIANCE_TYPE:
557
+ X = rand_data.X[covar_type]
558
+ Y = rand_data.Y
559
+ g = GaussianMixture(
560
+ n_components=rand_data.n_components,
561
+ random_state=rng,
562
+ weights_init=rand_data.weights,
563
+ means_init=rand_data.means,
564
+ precisions_init=rand_data.precisions[covar_type],
565
+ covariance_type=covar_type,
566
+ max_iter=max_iter,
567
+ tol=tol,
568
+ )
569
+
570
+ # check if fit_predict(X) is equivalent to fit(X).predict(X)
571
+ f = copy.deepcopy(g)
572
+ Y_pred1 = f.fit(X).predict(X)
573
+ Y_pred2 = g.fit_predict(X)
574
+ assert_array_equal(Y_pred1, Y_pred2)
575
+ assert adjusted_rand_score(Y, Y_pred2) > 0.95
576
+
577
+
578
+ def test_gaussian_mixture_fit_predict_n_init():
579
+ # Check that fit_predict is equivalent to fit.predict, when n_init > 1
580
+ X = np.random.RandomState(0).randn(1000, 5)
581
+ gm = GaussianMixture(n_components=5, n_init=5, random_state=0)
582
+ y_pred1 = gm.fit_predict(X)
583
+ y_pred2 = gm.predict(X)
584
+ assert_array_equal(y_pred1, y_pred2)
585
+
586
+
587
+ def test_gaussian_mixture_fit():
588
+ # recover the ground truth
589
+ rng = np.random.RandomState(0)
590
+ rand_data = RandomData(rng)
591
+ n_features = rand_data.n_features
592
+ n_components = rand_data.n_components
593
+
594
+ for covar_type in COVARIANCE_TYPE:
595
+ X = rand_data.X[covar_type]
596
+ g = GaussianMixture(
597
+ n_components=n_components,
598
+ n_init=20,
599
+ reg_covar=0,
600
+ random_state=rng,
601
+ covariance_type=covar_type,
602
+ )
603
+ g.fit(X)
604
+
605
+ # needs more data to pass the test with rtol=1e-7
606
+ assert_allclose(
607
+ np.sort(g.weights_), np.sort(rand_data.weights), rtol=0.1, atol=1e-2
608
+ )
609
+
610
+ arg_idx1 = g.means_[:, 0].argsort()
611
+ arg_idx2 = rand_data.means[:, 0].argsort()
612
+ assert_allclose(
613
+ g.means_[arg_idx1], rand_data.means[arg_idx2], rtol=0.1, atol=1e-2
614
+ )
615
+
616
+ if covar_type == "full":
617
+ prec_pred = g.precisions_
618
+ prec_test = rand_data.precisions["full"]
619
+ elif covar_type == "tied":
620
+ prec_pred = np.array([g.precisions_] * n_components)
621
+ prec_test = np.array([rand_data.precisions["tied"]] * n_components)
622
+ elif covar_type == "spherical":
623
+ prec_pred = np.array([np.eye(n_features) * c for c in g.precisions_])
624
+ prec_test = np.array(
625
+ [np.eye(n_features) * c for c in rand_data.precisions["spherical"]]
626
+ )
627
+ elif covar_type == "diag":
628
+ prec_pred = np.array([np.diag(d) for d in g.precisions_])
629
+ prec_test = np.array([np.diag(d) for d in rand_data.precisions["diag"]])
630
+
631
+ arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
632
+ arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
633
+ for k, h in zip(arg_idx1, arg_idx2):
634
+ ecov = EmpiricalCovariance()
635
+ ecov.covariance_ = prec_test[h]
636
+ # the accuracy depends on the number of data and randomness, rng
637
+ assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15)
638
+
639
+
640
+ def test_gaussian_mixture_fit_best_params():
641
+ rng = np.random.RandomState(0)
642
+ rand_data = RandomData(rng)
643
+ n_components = rand_data.n_components
644
+ n_init = 10
645
+ for covar_type in COVARIANCE_TYPE:
646
+ X = rand_data.X[covar_type]
647
+ g = GaussianMixture(
648
+ n_components=n_components,
649
+ n_init=1,
650
+ reg_covar=0,
651
+ random_state=rng,
652
+ covariance_type=covar_type,
653
+ )
654
+ ll = []
655
+ for _ in range(n_init):
656
+ g.fit(X)
657
+ ll.append(g.score(X))
658
+ ll = np.array(ll)
659
+ g_best = GaussianMixture(
660
+ n_components=n_components,
661
+ n_init=n_init,
662
+ reg_covar=0,
663
+ random_state=rng,
664
+ covariance_type=covar_type,
665
+ )
666
+ g_best.fit(X)
667
+ assert_almost_equal(ll.min(), g_best.score(X))
668
+
669
+
670
+ def test_gaussian_mixture_fit_convergence_warning():
671
+ rng = np.random.RandomState(0)
672
+ rand_data = RandomData(rng, scale=1)
673
+ n_components = rand_data.n_components
674
+ max_iter = 1
675
+ for covar_type in COVARIANCE_TYPE:
676
+ X = rand_data.X[covar_type]
677
+ g = GaussianMixture(
678
+ n_components=n_components,
679
+ n_init=1,
680
+ max_iter=max_iter,
681
+ reg_covar=0,
682
+ random_state=rng,
683
+ covariance_type=covar_type,
684
+ )
685
+ msg = (
686
+ f"Initialization {max_iter} did not converge. Try different init "
687
+ "parameters, or increase max_iter, tol or check for degenerate"
688
+ " data."
689
+ )
690
+ with pytest.warns(ConvergenceWarning, match=msg):
691
+ g.fit(X)
692
+
693
+
694
+ def test_multiple_init():
695
+ # Test that multiple inits does not much worse than a single one
696
+ rng = np.random.RandomState(0)
697
+ n_samples, n_features, n_components = 50, 5, 2
698
+ X = rng.randn(n_samples, n_features)
699
+ for cv_type in COVARIANCE_TYPE:
700
+ train1 = (
701
+ GaussianMixture(
702
+ n_components=n_components, covariance_type=cv_type, random_state=0
703
+ )
704
+ .fit(X)
705
+ .score(X)
706
+ )
707
+ train2 = (
708
+ GaussianMixture(
709
+ n_components=n_components,
710
+ covariance_type=cv_type,
711
+ random_state=0,
712
+ n_init=5,
713
+ )
714
+ .fit(X)
715
+ .score(X)
716
+ )
717
+ assert train2 >= train1
718
+
719
+
720
+ def test_gaussian_mixture_n_parameters():
721
+ # Test that the right number of parameters is estimated
722
+ rng = np.random.RandomState(0)
723
+ n_samples, n_features, n_components = 50, 5, 2
724
+ X = rng.randn(n_samples, n_features)
725
+ n_params = {"spherical": 13, "diag": 21, "tied": 26, "full": 41}
726
+ for cv_type in COVARIANCE_TYPE:
727
+ g = GaussianMixture(
728
+ n_components=n_components, covariance_type=cv_type, random_state=rng
729
+ ).fit(X)
730
+ assert g._n_parameters() == n_params[cv_type]
731
+
732
+
733
+ def test_bic_1d_1component():
734
+ # Test all of the covariance_types return the same BIC score for
735
+ # 1-dimensional, 1 component fits.
736
+ rng = np.random.RandomState(0)
737
+ n_samples, n_dim, n_components = 100, 1, 1
738
+ X = rng.randn(n_samples, n_dim)
739
+ bic_full = (
740
+ GaussianMixture(
741
+ n_components=n_components, covariance_type="full", random_state=rng
742
+ )
743
+ .fit(X)
744
+ .bic(X)
745
+ )
746
+ for covariance_type in ["tied", "diag", "spherical"]:
747
+ bic = (
748
+ GaussianMixture(
749
+ n_components=n_components,
750
+ covariance_type=covariance_type,
751
+ random_state=rng,
752
+ )
753
+ .fit(X)
754
+ .bic(X)
755
+ )
756
+ assert_almost_equal(bic_full, bic)
757
+
758
+
759
+ def test_gaussian_mixture_aic_bic():
760
+ # Test the aic and bic criteria
761
+ rng = np.random.RandomState(0)
762
+ n_samples, n_features, n_components = 50, 3, 2
763
+ X = rng.randn(n_samples, n_features)
764
+ # standard gaussian entropy
765
+ sgh = 0.5 * (
766
+ fast_logdet(np.cov(X.T, bias=1)) + n_features * (1 + np.log(2 * np.pi))
767
+ )
768
+ for cv_type in COVARIANCE_TYPE:
769
+ g = GaussianMixture(
770
+ n_components=n_components,
771
+ covariance_type=cv_type,
772
+ random_state=rng,
773
+ max_iter=200,
774
+ )
775
+ g.fit(X)
776
+ aic = 2 * n_samples * sgh + 2 * g._n_parameters()
777
+ bic = 2 * n_samples * sgh + np.log(n_samples) * g._n_parameters()
778
+ bound = n_features / np.sqrt(n_samples)
779
+ assert (g.aic(X) - aic) / n_samples < bound
780
+ assert (g.bic(X) - bic) / n_samples < bound
781
+
782
+
783
+ def test_gaussian_mixture_verbose():
784
+ rng = np.random.RandomState(0)
785
+ rand_data = RandomData(rng)
786
+ n_components = rand_data.n_components
787
+ for covar_type in COVARIANCE_TYPE:
788
+ X = rand_data.X[covar_type]
789
+ g = GaussianMixture(
790
+ n_components=n_components,
791
+ n_init=1,
792
+ reg_covar=0,
793
+ random_state=rng,
794
+ covariance_type=covar_type,
795
+ verbose=1,
796
+ )
797
+ h = GaussianMixture(
798
+ n_components=n_components,
799
+ n_init=1,
800
+ reg_covar=0,
801
+ random_state=rng,
802
+ covariance_type=covar_type,
803
+ verbose=2,
804
+ )
805
+ old_stdout = sys.stdout
806
+ sys.stdout = StringIO()
807
+ try:
808
+ g.fit(X)
809
+ h.fit(X)
810
+ finally:
811
+ sys.stdout = old_stdout
812
+
813
+
814
+ @pytest.mark.filterwarnings("ignore:.*did not converge.*")
815
+ @pytest.mark.parametrize("seed", (0, 1, 2))
816
+ def test_warm_start(seed):
817
+ random_state = seed
818
+ rng = np.random.RandomState(random_state)
819
+ n_samples, n_features, n_components = 500, 2, 2
820
+ X = rng.rand(n_samples, n_features)
821
+
822
+ # Assert the warm_start give the same result for the same number of iter
823
+ g = GaussianMixture(
824
+ n_components=n_components,
825
+ n_init=1,
826
+ max_iter=2,
827
+ reg_covar=0,
828
+ random_state=random_state,
829
+ warm_start=False,
830
+ )
831
+ h = GaussianMixture(
832
+ n_components=n_components,
833
+ n_init=1,
834
+ max_iter=1,
835
+ reg_covar=0,
836
+ random_state=random_state,
837
+ warm_start=True,
838
+ )
839
+
840
+ g.fit(X)
841
+ score1 = h.fit(X).score(X)
842
+ score2 = h.fit(X).score(X)
843
+
844
+ assert_almost_equal(g.weights_, h.weights_)
845
+ assert_almost_equal(g.means_, h.means_)
846
+ assert_almost_equal(g.precisions_, h.precisions_)
847
+ assert score2 > score1
848
+
849
+ # Assert that by using warm_start we can converge to a good solution
850
+ g = GaussianMixture(
851
+ n_components=n_components,
852
+ n_init=1,
853
+ max_iter=5,
854
+ reg_covar=0,
855
+ random_state=random_state,
856
+ warm_start=False,
857
+ tol=1e-6,
858
+ )
859
+ h = GaussianMixture(
860
+ n_components=n_components,
861
+ n_init=1,
862
+ max_iter=5,
863
+ reg_covar=0,
864
+ random_state=random_state,
865
+ warm_start=True,
866
+ tol=1e-6,
867
+ )
868
+
869
+ g.fit(X)
870
+ assert not g.converged_
871
+
872
+ h.fit(X)
873
+ # depending on the data there is large variability in the number of
874
+ # refit necessary to converge due to the complete randomness of the
875
+ # data
876
+ for _ in range(1000):
877
+ h.fit(X)
878
+ if h.converged_:
879
+ break
880
+ assert h.converged_
881
+
882
+
883
+ @ignore_warnings(category=ConvergenceWarning)
884
+ def test_convergence_detected_with_warm_start():
885
+ # We check that convergence is detected when warm_start=True
886
+ rng = np.random.RandomState(0)
887
+ rand_data = RandomData(rng)
888
+ n_components = rand_data.n_components
889
+ X = rand_data.X["full"]
890
+
891
+ for max_iter in (1, 2, 50):
892
+ gmm = GaussianMixture(
893
+ n_components=n_components,
894
+ warm_start=True,
895
+ max_iter=max_iter,
896
+ random_state=rng,
897
+ )
898
+ for _ in range(100):
899
+ gmm.fit(X)
900
+ if gmm.converged_:
901
+ break
902
+ assert gmm.converged_
903
+ assert max_iter >= gmm.n_iter_
904
+
905
+
906
+ def test_score():
907
+ covar_type = "full"
908
+ rng = np.random.RandomState(0)
909
+ rand_data = RandomData(rng, scale=7)
910
+ n_components = rand_data.n_components
911
+ X = rand_data.X[covar_type]
912
+
913
+ # Check the error message if we don't call fit
914
+ gmm1 = GaussianMixture(
915
+ n_components=n_components,
916
+ n_init=1,
917
+ max_iter=1,
918
+ reg_covar=0,
919
+ random_state=rng,
920
+ covariance_type=covar_type,
921
+ )
922
+ msg = (
923
+ "This GaussianMixture instance is not fitted yet. Call 'fit' with "
924
+ "appropriate arguments before using this estimator."
925
+ )
926
+ with pytest.raises(NotFittedError, match=msg):
927
+ gmm1.score(X)
928
+
929
+ # Check score value
930
+ with warnings.catch_warnings():
931
+ warnings.simplefilter("ignore", ConvergenceWarning)
932
+ gmm1.fit(X)
933
+ gmm_score = gmm1.score(X)
934
+ gmm_score_proba = gmm1.score_samples(X).mean()
935
+ assert_almost_equal(gmm_score, gmm_score_proba)
936
+
937
+ # Check if the score increase
938
+ gmm2 = GaussianMixture(
939
+ n_components=n_components,
940
+ n_init=1,
941
+ reg_covar=0,
942
+ random_state=rng,
943
+ covariance_type=covar_type,
944
+ ).fit(X)
945
+ assert gmm2.score(X) > gmm1.score(X)
946
+
947
+
948
+ def test_score_samples():
949
+ covar_type = "full"
950
+ rng = np.random.RandomState(0)
951
+ rand_data = RandomData(rng, scale=7)
952
+ n_components = rand_data.n_components
953
+ X = rand_data.X[covar_type]
954
+
955
+ # Check the error message if we don't call fit
956
+ gmm = GaussianMixture(
957
+ n_components=n_components,
958
+ n_init=1,
959
+ reg_covar=0,
960
+ random_state=rng,
961
+ covariance_type=covar_type,
962
+ )
963
+ msg = (
964
+ "This GaussianMixture instance is not fitted yet. Call 'fit' with "
965
+ "appropriate arguments before using this estimator."
966
+ )
967
+ with pytest.raises(NotFittedError, match=msg):
968
+ gmm.score_samples(X)
969
+
970
+ gmm_score_samples = gmm.fit(X).score_samples(X)
971
+ assert gmm_score_samples.shape[0] == rand_data.n_samples
972
+
973
+
974
+ def test_monotonic_likelihood():
975
+ # We check that each step of the EM without regularization improve
976
+ # monotonically the training set likelihood
977
+ rng = np.random.RandomState(0)
978
+ rand_data = RandomData(rng, scale=7)
979
+ n_components = rand_data.n_components
980
+
981
+ for covar_type in COVARIANCE_TYPE:
982
+ X = rand_data.X[covar_type]
983
+ gmm = GaussianMixture(
984
+ n_components=n_components,
985
+ covariance_type=covar_type,
986
+ reg_covar=0,
987
+ warm_start=True,
988
+ max_iter=1,
989
+ random_state=rng,
990
+ tol=1e-7,
991
+ )
992
+ current_log_likelihood = -np.inf
993
+ with warnings.catch_warnings():
994
+ warnings.simplefilter("ignore", ConvergenceWarning)
995
+ # Do one training iteration at a time so we can make sure that the
996
+ # training log likelihood increases after each iteration.
997
+ for _ in range(600):
998
+ prev_log_likelihood = current_log_likelihood
999
+ current_log_likelihood = gmm.fit(X).score(X)
1000
+ assert current_log_likelihood >= prev_log_likelihood
1001
+
1002
+ if gmm.converged_:
1003
+ break
1004
+
1005
+ assert gmm.converged_
1006
+
1007
+
1008
+ def test_regularisation():
1009
+ # We train the GaussianMixture on degenerate data by defining two clusters
1010
+ # of a 0 covariance.
1011
+ rng = np.random.RandomState(0)
1012
+ n_samples, n_features = 10, 5
1013
+
1014
+ X = np.vstack(
1015
+ (np.ones((n_samples // 2, n_features)), np.zeros((n_samples // 2, n_features)))
1016
+ )
1017
+
1018
+ for covar_type in COVARIANCE_TYPE:
1019
+ gmm = GaussianMixture(
1020
+ n_components=n_samples,
1021
+ reg_covar=0,
1022
+ covariance_type=covar_type,
1023
+ random_state=rng,
1024
+ )
1025
+
1026
+ with warnings.catch_warnings():
1027
+ warnings.simplefilter("ignore", RuntimeWarning)
1028
+ msg = re.escape(
1029
+ "Fitting the mixture model failed because some components have"
1030
+ " ill-defined empirical covariance (for instance caused by "
1031
+ "singleton or collapsed samples). Try to decrease the number "
1032
+ "of components, or increase reg_covar."
1033
+ )
1034
+ with pytest.raises(ValueError, match=msg):
1035
+ gmm.fit(X)
1036
+
1037
+ gmm.set_params(reg_covar=1e-6).fit(X)
1038
+
1039
+
1040
+ def test_property():
1041
+ rng = np.random.RandomState(0)
1042
+ rand_data = RandomData(rng, scale=7)
1043
+ n_components = rand_data.n_components
1044
+
1045
+ for covar_type in COVARIANCE_TYPE:
1046
+ X = rand_data.X[covar_type]
1047
+ gmm = GaussianMixture(
1048
+ n_components=n_components,
1049
+ covariance_type=covar_type,
1050
+ random_state=rng,
1051
+ n_init=5,
1052
+ )
1053
+ gmm.fit(X)
1054
+ if covar_type == "full":
1055
+ for prec, covar in zip(gmm.precisions_, gmm.covariances_):
1056
+ assert_array_almost_equal(linalg.inv(prec), covar)
1057
+ elif covar_type == "tied":
1058
+ assert_array_almost_equal(linalg.inv(gmm.precisions_), gmm.covariances_)
1059
+ else:
1060
+ assert_array_almost_equal(gmm.precisions_, 1.0 / gmm.covariances_)
1061
+
1062
+
1063
+ def test_sample():
1064
+ rng = np.random.RandomState(0)
1065
+ rand_data = RandomData(rng, scale=7, n_components=3)
1066
+ n_features, n_components = rand_data.n_features, rand_data.n_components
1067
+
1068
+ for covar_type in COVARIANCE_TYPE:
1069
+ X = rand_data.X[covar_type]
1070
+
1071
+ gmm = GaussianMixture(
1072
+ n_components=n_components, covariance_type=covar_type, random_state=rng
1073
+ )
1074
+ # To sample we need that GaussianMixture is fitted
1075
+ msg = "This GaussianMixture instance is not fitted"
1076
+ with pytest.raises(NotFittedError, match=msg):
1077
+ gmm.sample(0)
1078
+ gmm.fit(X)
1079
+
1080
+ msg = "Invalid value for 'n_samples'"
1081
+ with pytest.raises(ValueError, match=msg):
1082
+ gmm.sample(0)
1083
+
1084
+ # Just to make sure the class samples correctly
1085
+ n_samples = 20000
1086
+ X_s, y_s = gmm.sample(n_samples)
1087
+
1088
+ for k in range(n_components):
1089
+ if covar_type == "full":
1090
+ assert_array_almost_equal(
1091
+ gmm.covariances_[k], np.cov(X_s[y_s == k].T), decimal=1
1092
+ )
1093
+ elif covar_type == "tied":
1094
+ assert_array_almost_equal(
1095
+ gmm.covariances_, np.cov(X_s[y_s == k].T), decimal=1
1096
+ )
1097
+ elif covar_type == "diag":
1098
+ assert_array_almost_equal(
1099
+ gmm.covariances_[k], np.diag(np.cov(X_s[y_s == k].T)), decimal=1
1100
+ )
1101
+ else:
1102
+ assert_array_almost_equal(
1103
+ gmm.covariances_[k],
1104
+ np.var(X_s[y_s == k] - gmm.means_[k]),
1105
+ decimal=1,
1106
+ )
1107
+
1108
+ means_s = np.array([np.mean(X_s[y_s == k], 0) for k in range(n_components)])
1109
+ assert_array_almost_equal(gmm.means_, means_s, decimal=1)
1110
+
1111
+ # Check shapes of sampled data, see
1112
+ # https://github.com/scikit-learn/scikit-learn/issues/7701
1113
+ assert X_s.shape == (n_samples, n_features)
1114
+
1115
+ for sample_size in range(1, 100):
1116
+ X_s, _ = gmm.sample(sample_size)
1117
+ assert X_s.shape == (sample_size, n_features)
1118
+
1119
+
1120
+ @ignore_warnings(category=ConvergenceWarning)
1121
+ def test_init():
1122
+ # We check that by increasing the n_init number we have a better solution
1123
+ for random_state in range(15):
1124
+ rand_data = RandomData(
1125
+ np.random.RandomState(random_state), n_samples=50, scale=1
1126
+ )
1127
+ n_components = rand_data.n_components
1128
+ X = rand_data.X["full"]
1129
+
1130
+ gmm1 = GaussianMixture(
1131
+ n_components=n_components, n_init=1, max_iter=1, random_state=random_state
1132
+ ).fit(X)
1133
+ gmm2 = GaussianMixture(
1134
+ n_components=n_components, n_init=10, max_iter=1, random_state=random_state
1135
+ ).fit(X)
1136
+
1137
+ assert gmm2.lower_bound_ >= gmm1.lower_bound_
1138
+
1139
+
1140
+ def test_gaussian_mixture_setting_best_params():
1141
+ """`GaussianMixture`'s best_parameters, `n_iter_` and `lower_bound_`
1142
+ must be set appropriately in the case of divergence.
1143
+
1144
+ Non-regression test for:
1145
+ https://github.com/scikit-learn/scikit-learn/issues/18216
1146
+ """
1147
+ rnd = np.random.RandomState(0)
1148
+ n_samples = 30
1149
+ X = rnd.uniform(size=(n_samples, 3))
1150
+
1151
+ # following initialization parameters were found to lead to divergence
1152
+ means_init = np.array(
1153
+ [
1154
+ [0.670637869618158, 0.21038256107384043, 0.12892629765485303],
1155
+ [0.09394051075844147, 0.5759464955561779, 0.929296197576212],
1156
+ [0.5033230372781258, 0.9569852381759425, 0.08654043447295741],
1157
+ [0.18578301420435747, 0.5531158970919143, 0.19388943970532435],
1158
+ [0.4548589928173794, 0.35182513658825276, 0.568146063202464],
1159
+ [0.609279894978321, 0.7929063819678847, 0.9620097270828052],
1160
+ ]
1161
+ )
1162
+ precisions_init = np.array(
1163
+ [
1164
+ 999999.999604483,
1165
+ 999999.9990869573,
1166
+ 553.7603944542167,
1167
+ 204.78596008931834,
1168
+ 15.867423501783637,
1169
+ 85.4595728389735,
1170
+ ]
1171
+ )
1172
+ weights_init = [
1173
+ 0.03333333333333341,
1174
+ 0.03333333333333341,
1175
+ 0.06666666666666674,
1176
+ 0.06666666666666674,
1177
+ 0.7000000000000001,
1178
+ 0.10000000000000007,
1179
+ ]
1180
+
1181
+ gmm = GaussianMixture(
1182
+ covariance_type="spherical",
1183
+ reg_covar=0,
1184
+ means_init=means_init,
1185
+ weights_init=weights_init,
1186
+ random_state=rnd,
1187
+ n_components=len(weights_init),
1188
+ precisions_init=precisions_init,
1189
+ max_iter=1,
1190
+ )
1191
+ # ensure that no error is thrown during fit
1192
+ gmm.fit(X)
1193
+
1194
+ # check that the fit did not converge
1195
+ assert not gmm.converged_
1196
+
1197
+ # check that parameters are set for gmm
1198
+ for attr in [
1199
+ "weights_",
1200
+ "means_",
1201
+ "covariances_",
1202
+ "precisions_cholesky_",
1203
+ "n_iter_",
1204
+ "lower_bound_",
1205
+ ]:
1206
+ assert hasattr(gmm, attr)
1207
+
1208
+
1209
+ @pytest.mark.parametrize(
1210
+ "init_params", ["random", "random_from_data", "k-means++", "kmeans"]
1211
+ )
1212
+ def test_init_means_not_duplicated(init_params, global_random_seed):
1213
+ # Check that all initialisations provide not duplicated starting means
1214
+ rng = np.random.RandomState(global_random_seed)
1215
+ rand_data = RandomData(rng, scale=5)
1216
+ n_components = rand_data.n_components
1217
+ X = rand_data.X["full"]
1218
+
1219
+ gmm = GaussianMixture(
1220
+ n_components=n_components, init_params=init_params, random_state=rng, max_iter=0
1221
+ )
1222
+ gmm.fit(X)
1223
+
1224
+ means = gmm.means_
1225
+ for i_mean, j_mean in itertools.combinations(means, r=2):
1226
+ assert not np.allclose(i_mean, j_mean)
1227
+
1228
+
1229
+ @pytest.mark.parametrize(
1230
+ "init_params", ["random", "random_from_data", "k-means++", "kmeans"]
1231
+ )
1232
+ def test_means_for_all_inits(init_params, global_random_seed):
1233
+ # Check fitted means properties for all initializations
1234
+ rng = np.random.RandomState(global_random_seed)
1235
+ rand_data = RandomData(rng, scale=5)
1236
+ n_components = rand_data.n_components
1237
+ X = rand_data.X["full"]
1238
+
1239
+ gmm = GaussianMixture(
1240
+ n_components=n_components, init_params=init_params, random_state=rng
1241
+ )
1242
+ gmm.fit(X)
1243
+
1244
+ assert gmm.means_.shape == (n_components, X.shape[1])
1245
+ assert np.all(X.min(axis=0) <= gmm.means_)
1246
+ assert np.all(gmm.means_ <= X.max(axis=0))
1247
+ assert gmm.converged_
1248
+
1249
+
1250
+ def test_max_iter_zero():
1251
+ # Check that max_iter=0 returns initialisation as expected
1252
+ # Pick arbitrary initial means and check equal to max_iter=0
1253
+ rng = np.random.RandomState(0)
1254
+ rand_data = RandomData(rng, scale=5)
1255
+ n_components = rand_data.n_components
1256
+ X = rand_data.X["full"]
1257
+ means_init = [[20, 30], [30, 25]]
1258
+ gmm = GaussianMixture(
1259
+ n_components=n_components,
1260
+ random_state=rng,
1261
+ means_init=means_init,
1262
+ tol=1e-06,
1263
+ max_iter=0,
1264
+ )
1265
+ gmm.fit(X)
1266
+
1267
+ assert_allclose(gmm.means_, means_init)
1268
+
1269
+
1270
+ def test_gaussian_mixture_precisions_init_diag():
1271
+ """Check that we properly initialize `precision_cholesky_` when we manually
1272
+ provide the precision matrix.
1273
+
1274
+ In this regard, we check the consistency between estimating the precision
1275
+ matrix and providing the same precision matrix as initialization. It should
1276
+ lead to the same results with the same number of iterations.
1277
+
1278
+ If the initialization is wrong then the number of iterations will increase.
1279
+
1280
+ Non-regression test for:
1281
+ https://github.com/scikit-learn/scikit-learn/issues/16944
1282
+ """
1283
+ # generate a toy dataset
1284
+ n_samples = 300
1285
+ rng = np.random.RandomState(0)
1286
+ shifted_gaussian = rng.randn(n_samples, 2) + np.array([20, 20])
1287
+ C = np.array([[0.0, -0.7], [3.5, 0.7]])
1288
+ stretched_gaussian = np.dot(rng.randn(n_samples, 2), C)
1289
+ X = np.vstack([shifted_gaussian, stretched_gaussian])
1290
+
1291
+ # common parameters to check the consistency of precision initialization
1292
+ n_components, covariance_type, reg_covar, random_state = 2, "diag", 1e-6, 0
1293
+
1294
+ # execute the manual initialization to compute the precision matrix:
1295
+ # - run KMeans to have an initial guess
1296
+ # - estimate the covariance
1297
+ # - compute the precision matrix from the estimated covariance
1298
+ resp = np.zeros((X.shape[0], n_components))
1299
+ label = (
1300
+ KMeans(n_clusters=n_components, n_init=1, random_state=random_state)
1301
+ .fit(X)
1302
+ .labels_
1303
+ )
1304
+ resp[np.arange(X.shape[0]), label] = 1
1305
+ _, _, covariance = _estimate_gaussian_parameters(
1306
+ X, resp, reg_covar=reg_covar, covariance_type=covariance_type
1307
+ )
1308
+ precisions_init = 1 / covariance
1309
+
1310
+ gm_with_init = GaussianMixture(
1311
+ n_components=n_components,
1312
+ covariance_type=covariance_type,
1313
+ reg_covar=reg_covar,
1314
+ precisions_init=precisions_init,
1315
+ random_state=random_state,
1316
+ ).fit(X)
1317
+
1318
+ gm_without_init = GaussianMixture(
1319
+ n_components=n_components,
1320
+ covariance_type=covariance_type,
1321
+ reg_covar=reg_covar,
1322
+ random_state=random_state,
1323
+ ).fit(X)
1324
+
1325
+ assert gm_without_init.n_iter_ == gm_with_init.n_iter_
1326
+ assert_allclose(
1327
+ gm_with_init.precisions_cholesky_, gm_without_init.precisions_cholesky_
1328
+ )
1329
+
1330
+
1331
+ def _generate_data(seed, n_samples, n_features, n_components):
1332
+ """Randomly generate samples and responsibilities."""
1333
+ rs = np.random.RandomState(seed)
1334
+ X = rs.random_sample((n_samples, n_features))
1335
+ resp = rs.random_sample((n_samples, n_components))
1336
+ resp /= resp.sum(axis=1)[:, np.newaxis]
1337
+ return X, resp
1338
+
1339
+
1340
+ def _calculate_precisions(X, resp, covariance_type):
1341
+ """Calculate precision matrix of X and its Cholesky decomposition
1342
+ for the given covariance type.
1343
+ """
1344
+ reg_covar = 1e-6
1345
+ weights, means, covariances = _estimate_gaussian_parameters(
1346
+ X, resp, reg_covar, covariance_type
1347
+ )
1348
+ precisions_cholesky = _compute_precision_cholesky(covariances, covariance_type)
1349
+
1350
+ _, n_components = resp.shape
1351
+ # Instantiate a `GaussianMixture` model in order to use its
1352
+ # `_set_parameters` method to return the `precisions_` and
1353
+ # `precisions_cholesky_` from matching the `covariance_type`
1354
+ # provided.
1355
+ gmm = GaussianMixture(n_components=n_components, covariance_type=covariance_type)
1356
+ params = (weights, means, covariances, precisions_cholesky)
1357
+ gmm._set_parameters(params)
1358
+ return gmm.precisions_, gmm.precisions_cholesky_
1359
+
1360
+
1361
+ @pytest.mark.parametrize("covariance_type", COVARIANCE_TYPE)
1362
+ def test_gaussian_mixture_precisions_init(covariance_type, global_random_seed):
1363
+ """Non-regression test for #26415."""
1364
+
1365
+ X, resp = _generate_data(
1366
+ seed=global_random_seed,
1367
+ n_samples=100,
1368
+ n_features=3,
1369
+ n_components=4,
1370
+ )
1371
+
1372
+ precisions_init, desired_precisions_cholesky = _calculate_precisions(
1373
+ X, resp, covariance_type
1374
+ )
1375
+ gmm = GaussianMixture(
1376
+ covariance_type=covariance_type, precisions_init=precisions_init
1377
+ )
1378
+ gmm._initialize(X, resp)
1379
+ actual_precisions_cholesky = gmm.precisions_cholesky_
1380
+ assert_allclose(actual_precisions_cholesky, desired_precisions_cholesky)
1381
+
1382
+
1383
+ def test_gaussian_mixture_single_component_stable():
1384
+ """
1385
+ Non-regression test for #23032 ensuring 1-component GM works on only a
1386
+ few samples.
1387
+ """
1388
+ rng = np.random.RandomState(0)
1389
+ X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3)
1390
+ gm = GaussianMixture(n_components=1)
1391
+ gm.fit(X).sample()
1392
+
1393
+
1394
+ def test_gaussian_mixture_all_init_does_not_estimate_gaussian_parameters(
1395
+ monkeypatch,
1396
+ global_random_seed,
1397
+ ):
1398
+ """When all init parameters are provided, the Gaussian parameters
1399
+ are not estimated.
1400
+
1401
+ Non-regression test for gh-26015.
1402
+ """
1403
+
1404
+ mock = Mock(side_effect=_estimate_gaussian_parameters)
1405
+ monkeypatch.setattr(
1406
+ sklearn.mixture._gaussian_mixture, "_estimate_gaussian_parameters", mock
1407
+ )
1408
+
1409
+ rng = np.random.RandomState(global_random_seed)
1410
+ rand_data = RandomData(rng)
1411
+
1412
+ gm = GaussianMixture(
1413
+ n_components=rand_data.n_components,
1414
+ weights_init=rand_data.weights,
1415
+ means_init=rand_data.means,
1416
+ precisions_init=rand_data.precisions["full"],
1417
+ random_state=rng,
1418
+ )
1419
+ gm.fit(rand_data.X["full"])
1420
+ # The initial gaussian parameters are not estimated. They are estimated for every
1421
+ # m_step.
1422
+ assert mock.call_count == gm.n_iter_
llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Guillaume Lemaitre <[email protected]>
2
+ # License: BSD 3 clause
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from sklearn.mixture import BayesianGaussianMixture, GaussianMixture
8
+
9
+
10
+ @pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()])
11
+ def test_gaussian_mixture_n_iter(estimator):
12
+ # check that n_iter is the number of iteration performed.
13
+ rng = np.random.RandomState(0)
14
+ X = rng.rand(10, 5)
15
+ max_iter = 1
16
+ estimator.set_params(max_iter=max_iter)
17
+ estimator.fit(X)
18
+ assert estimator.n_iter_ == max_iter
19
+
20
+
21
+ @pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()])
22
+ def test_mixture_n_components_greater_than_n_samples_error(estimator):
23
+ """Check error when n_components <= n_samples"""
24
+ rng = np.random.RandomState(0)
25
+ X = rng.rand(10, 5)
26
+ estimator.set_params(n_components=12)
27
+
28
+ msg = "Expected n_samples >= n_components"
29
+ with pytest.raises(ValueError, match=msg):
30
+ estimator.fit(X)
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.preprocessing` module includes scaling, centering,
3
+ normalization, binarization methods.
4
+ """
5
+
6
+ from ._data import (
7
+ Binarizer,
8
+ KernelCenterer,
9
+ MaxAbsScaler,
10
+ MinMaxScaler,
11
+ Normalizer,
12
+ PowerTransformer,
13
+ QuantileTransformer,
14
+ RobustScaler,
15
+ StandardScaler,
16
+ add_dummy_feature,
17
+ binarize,
18
+ maxabs_scale,
19
+ minmax_scale,
20
+ normalize,
21
+ power_transform,
22
+ quantile_transform,
23
+ robust_scale,
24
+ scale,
25
+ )
26
+ from ._discretization import KBinsDiscretizer
27
+ from ._encoders import OneHotEncoder, OrdinalEncoder
28
+ from ._function_transformer import FunctionTransformer
29
+ from ._label import LabelBinarizer, LabelEncoder, MultiLabelBinarizer, label_binarize
30
+ from ._polynomial import PolynomialFeatures, SplineTransformer
31
+ from ._target_encoder import TargetEncoder
32
+
33
+ __all__ = [
34
+ "Binarizer",
35
+ "FunctionTransformer",
36
+ "KBinsDiscretizer",
37
+ "KernelCenterer",
38
+ "LabelBinarizer",
39
+ "LabelEncoder",
40
+ "MultiLabelBinarizer",
41
+ "MinMaxScaler",
42
+ "MaxAbsScaler",
43
+ "QuantileTransformer",
44
+ "Normalizer",
45
+ "OneHotEncoder",
46
+ "OrdinalEncoder",
47
+ "PowerTransformer",
48
+ "RobustScaler",
49
+ "SplineTransformer",
50
+ "StandardScaler",
51
+ "TargetEncoder",
52
+ "add_dummy_feature",
53
+ "PolynomialFeatures",
54
+ "binarize",
55
+ "normalize",
56
+ "scale",
57
+ "robust_scale",
58
+ "maxabs_scale",
59
+ "minmax_scale",
60
+ "label_binarize",
61
+ "quantile_transform",
62
+ "power_transform",
63
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (492 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_data.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Henry Lin <[email protected]>
2
+ # Tom Dupré la Tour
3
+
4
+ # License: BSD
5
+
6
+
7
+ import warnings
8
+ from numbers import Integral
9
+
10
+ import numpy as np
11
+
12
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
13
+ from ..utils import _safe_indexing
14
+ from ..utils._param_validation import Hidden, Interval, Options, StrOptions
15
+ from ..utils.stats import _weighted_percentile
16
+ from ..utils.validation import (
17
+ _check_feature_names_in,
18
+ _check_sample_weight,
19
+ check_array,
20
+ check_is_fitted,
21
+ check_random_state,
22
+ )
23
+ from ._encoders import OneHotEncoder
24
+
25
+
26
+ class KBinsDiscretizer(TransformerMixin, BaseEstimator):
27
+ """
28
+ Bin continuous data into intervals.
29
+
30
+ Read more in the :ref:`User Guide <preprocessing_discretization>`.
31
+
32
+ .. versionadded:: 0.20
33
+
34
+ Parameters
35
+ ----------
36
+ n_bins : int or array-like of shape (n_features,), default=5
37
+ The number of bins to produce. Raises ValueError if ``n_bins < 2``.
38
+
39
+ encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot'
40
+ Method used to encode the transformed result.
41
+
42
+ - 'onehot': Encode the transformed result with one-hot encoding
43
+ and return a sparse matrix. Ignored features are always
44
+ stacked to the right.
45
+ - 'onehot-dense': Encode the transformed result with one-hot encoding
46
+ and return a dense array. Ignored features are always
47
+ stacked to the right.
48
+ - 'ordinal': Return the bin identifier encoded as an integer value.
49
+
50
+ strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile'
51
+ Strategy used to define the widths of the bins.
52
+
53
+ - 'uniform': All bins in each feature have identical widths.
54
+ - 'quantile': All bins in each feature have the same number of points.
55
+ - 'kmeans': Values in each bin have the same nearest center of a 1D
56
+ k-means cluster.
57
+
58
+ For an example of the different strategies see:
59
+ :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_strategies.py`.
60
+
61
+ dtype : {np.float32, np.float64}, default=None
62
+ The desired data-type for the output. If None, output dtype is
63
+ consistent with input dtype. Only np.float32 and np.float64 are
64
+ supported.
65
+
66
+ .. versionadded:: 0.24
67
+
68
+ subsample : int or None, default='warn'
69
+ Maximum number of samples, used to fit the model, for computational
70
+ efficiency. Defaults to 200_000 when `strategy='quantile'` and to `None`
71
+ when `strategy='uniform'` or `strategy='kmeans'`.
72
+ `subsample=None` means that all the training samples are used when
73
+ computing the quantiles that determine the binning thresholds.
74
+ Since quantile computation relies on sorting each column of `X` and
75
+ that sorting has an `n log(n)` time complexity,
76
+ it is recommended to use subsampling on datasets with a
77
+ very large number of samples.
78
+
79
+ .. versionchanged:: 1.3
80
+ The default value of `subsample` changed from `None` to `200_000` when
81
+ `strategy="quantile"`.
82
+
83
+ .. versionchanged:: 1.5
84
+ The default value of `subsample` changed from `None` to `200_000` when
85
+ `strategy="uniform"` or `strategy="kmeans"`.
86
+
87
+ random_state : int, RandomState instance or None, default=None
88
+ Determines random number generation for subsampling.
89
+ Pass an int for reproducible results across multiple function calls.
90
+ See the `subsample` parameter for more details.
91
+ See :term:`Glossary <random_state>`.
92
+
93
+ .. versionadded:: 1.1
94
+
95
+ Attributes
96
+ ----------
97
+ bin_edges_ : ndarray of ndarray of shape (n_features,)
98
+ The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
99
+ Ignored features will have empty arrays.
100
+
101
+ n_bins_ : ndarray of shape (n_features,), dtype=np.int64
102
+ Number of bins per feature. Bins whose width are too small
103
+ (i.e., <= 1e-8) are removed with a warning.
104
+
105
+ n_features_in_ : int
106
+ Number of features seen during :term:`fit`.
107
+
108
+ .. versionadded:: 0.24
109
+
110
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
111
+ Names of features seen during :term:`fit`. Defined only when `X`
112
+ has feature names that are all strings.
113
+
114
+ .. versionadded:: 1.0
115
+
116
+ See Also
117
+ --------
118
+ Binarizer : Class used to bin values as ``0`` or
119
+ ``1`` based on a parameter ``threshold``.
120
+
121
+ Notes
122
+ -----
123
+
124
+ For a visualization of discretization on different datasets refer to
125
+ :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_classification.py`.
126
+ On the effect of discretization on linear models see:
127
+ :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization.py`.
128
+
129
+ In bin edges for feature ``i``, the first and last values are used only for
130
+ ``inverse_transform``. During transform, bin edges are extended to::
131
+
132
+ np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
133
+
134
+ You can combine ``KBinsDiscretizer`` with
135
+ :class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess
136
+ part of the features.
137
+
138
+ ``KBinsDiscretizer`` might produce constant features (e.g., when
139
+ ``encode = 'onehot'`` and certain bins do not contain any data).
140
+ These features can be removed with feature selection algorithms
141
+ (e.g., :class:`~sklearn.feature_selection.VarianceThreshold`).
142
+
143
+ Examples
144
+ --------
145
+ >>> from sklearn.preprocessing import KBinsDiscretizer
146
+ >>> X = [[-2, 1, -4, -1],
147
+ ... [-1, 2, -3, -0.5],
148
+ ... [ 0, 3, -2, 0.5],
149
+ ... [ 1, 4, -1, 2]]
150
+ >>> est = KBinsDiscretizer(
151
+ ... n_bins=3, encode='ordinal', strategy='uniform', subsample=None
152
+ ... )
153
+ >>> est.fit(X)
154
+ KBinsDiscretizer(...)
155
+ >>> Xt = est.transform(X)
156
+ >>> Xt # doctest: +SKIP
157
+ array([[ 0., 0., 0., 0.],
158
+ [ 1., 1., 1., 0.],
159
+ [ 2., 2., 2., 1.],
160
+ [ 2., 2., 2., 2.]])
161
+
162
+ Sometimes it may be useful to convert the data back into the original
163
+ feature space. The ``inverse_transform`` function converts the binned
164
+ data into the original feature space. Each value will be equal to the mean
165
+ of the two bin edges.
166
+
167
+ >>> est.bin_edges_[0]
168
+ array([-2., -1., 0., 1.])
169
+ >>> est.inverse_transform(Xt)
170
+ array([[-1.5, 1.5, -3.5, -0.5],
171
+ [-0.5, 2.5, -2.5, -0.5],
172
+ [ 0.5, 3.5, -1.5, 0.5],
173
+ [ 0.5, 3.5, -1.5, 1.5]])
174
+ """
175
+
176
+ _parameter_constraints: dict = {
177
+ "n_bins": [Interval(Integral, 2, None, closed="left"), "array-like"],
178
+ "encode": [StrOptions({"onehot", "onehot-dense", "ordinal"})],
179
+ "strategy": [StrOptions({"uniform", "quantile", "kmeans"})],
180
+ "dtype": [Options(type, {np.float64, np.float32}), None],
181
+ "subsample": [
182
+ Interval(Integral, 1, None, closed="left"),
183
+ None,
184
+ Hidden(StrOptions({"warn"})),
185
+ ],
186
+ "random_state": ["random_state"],
187
+ }
188
+
189
+ def __init__(
190
+ self,
191
+ n_bins=5,
192
+ *,
193
+ encode="onehot",
194
+ strategy="quantile",
195
+ dtype=None,
196
+ subsample="warn",
197
+ random_state=None,
198
+ ):
199
+ self.n_bins = n_bins
200
+ self.encode = encode
201
+ self.strategy = strategy
202
+ self.dtype = dtype
203
+ self.subsample = subsample
204
+ self.random_state = random_state
205
+
206
+ @_fit_context(prefer_skip_nested_validation=True)
207
+ def fit(self, X, y=None, sample_weight=None):
208
+ """
209
+ Fit the estimator.
210
+
211
+ Parameters
212
+ ----------
213
+ X : array-like of shape (n_samples, n_features)
214
+ Data to be discretized.
215
+
216
+ y : None
217
+ Ignored. This parameter exists only for compatibility with
218
+ :class:`~sklearn.pipeline.Pipeline`.
219
+
220
+ sample_weight : ndarray of shape (n_samples,)
221
+ Contains weight values to be associated with each sample.
222
+ Only possible when `strategy` is set to `"quantile"`.
223
+
224
+ .. versionadded:: 1.3
225
+
226
+ Returns
227
+ -------
228
+ self : object
229
+ Returns the instance itself.
230
+ """
231
+ X = self._validate_data(X, dtype="numeric")
232
+
233
+ if self.dtype in (np.float64, np.float32):
234
+ output_dtype = self.dtype
235
+ else: # self.dtype is None
236
+ output_dtype = X.dtype
237
+
238
+ n_samples, n_features = X.shape
239
+
240
+ if sample_weight is not None and self.strategy == "uniform":
241
+ raise ValueError(
242
+ "`sample_weight` was provided but it cannot be "
243
+ "used with strategy='uniform'. Got strategy="
244
+ f"{self.strategy!r} instead."
245
+ )
246
+
247
+ if self.strategy in ("uniform", "kmeans") and self.subsample == "warn":
248
+ warnings.warn(
249
+ (
250
+ "In version 1.5 onwards, subsample=200_000 "
251
+ "will be used by default. Set subsample explicitly to "
252
+ "silence this warning in the mean time. Set "
253
+ "subsample=None to disable subsampling explicitly."
254
+ ),
255
+ FutureWarning,
256
+ )
257
+
258
+ subsample = self.subsample
259
+ if subsample == "warn":
260
+ subsample = 200000 if self.strategy == "quantile" else None
261
+ if subsample is not None and n_samples > subsample:
262
+ rng = check_random_state(self.random_state)
263
+ subsample_idx = rng.choice(n_samples, size=subsample, replace=False)
264
+ X = _safe_indexing(X, subsample_idx)
265
+
266
+ n_features = X.shape[1]
267
+ n_bins = self._validate_n_bins(n_features)
268
+
269
+ if sample_weight is not None:
270
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
271
+
272
+ bin_edges = np.zeros(n_features, dtype=object)
273
+ for jj in range(n_features):
274
+ column = X[:, jj]
275
+ col_min, col_max = column.min(), column.max()
276
+
277
+ if col_min == col_max:
278
+ warnings.warn(
279
+ "Feature %d is constant and will be replaced with 0." % jj
280
+ )
281
+ n_bins[jj] = 1
282
+ bin_edges[jj] = np.array([-np.inf, np.inf])
283
+ continue
284
+
285
+ if self.strategy == "uniform":
286
+ bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
287
+
288
+ elif self.strategy == "quantile":
289
+ quantiles = np.linspace(0, 100, n_bins[jj] + 1)
290
+ if sample_weight is None:
291
+ bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
292
+ else:
293
+ bin_edges[jj] = np.asarray(
294
+ [
295
+ _weighted_percentile(column, sample_weight, q)
296
+ for q in quantiles
297
+ ],
298
+ dtype=np.float64,
299
+ )
300
+ elif self.strategy == "kmeans":
301
+ from ..cluster import KMeans # fixes import loops
302
+
303
+ # Deterministic initialization with uniform spacing
304
+ uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
305
+ init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
306
+
307
+ # 1D k-means procedure
308
+ km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
309
+ centers = km.fit(
310
+ column[:, None], sample_weight=sample_weight
311
+ ).cluster_centers_[:, 0]
312
+ # Must sort, centers may be unsorted even with sorted init
313
+ centers.sort()
314
+ bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
315
+ bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
316
+
317
+ # Remove bins whose width are too small (i.e., <= 1e-8)
318
+ if self.strategy in ("quantile", "kmeans"):
319
+ mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
320
+ bin_edges[jj] = bin_edges[jj][mask]
321
+ if len(bin_edges[jj]) - 1 != n_bins[jj]:
322
+ warnings.warn(
323
+ "Bins whose width are too small (i.e., <= "
324
+ "1e-8) in feature %d are removed. Consider "
325
+ "decreasing the number of bins." % jj
326
+ )
327
+ n_bins[jj] = len(bin_edges[jj]) - 1
328
+
329
+ self.bin_edges_ = bin_edges
330
+ self.n_bins_ = n_bins
331
+
332
+ if "onehot" in self.encode:
333
+ self._encoder = OneHotEncoder(
334
+ categories=[np.arange(i) for i in self.n_bins_],
335
+ sparse_output=self.encode == "onehot",
336
+ dtype=output_dtype,
337
+ )
338
+ # Fit the OneHotEncoder with toy datasets
339
+ # so that it's ready for use after the KBinsDiscretizer is fitted
340
+ self._encoder.fit(np.zeros((1, len(self.n_bins_))))
341
+
342
+ return self
343
+
344
+ def _validate_n_bins(self, n_features):
345
+ """Returns n_bins_, the number of bins per feature."""
346
+ orig_bins = self.n_bins
347
+ if isinstance(orig_bins, Integral):
348
+ return np.full(n_features, orig_bins, dtype=int)
349
+
350
+ n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False)
351
+
352
+ if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
353
+ raise ValueError("n_bins must be a scalar or array of shape (n_features,).")
354
+
355
+ bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
356
+
357
+ violating_indices = np.where(bad_nbins_value)[0]
358
+ if violating_indices.shape[0] > 0:
359
+ indices = ", ".join(str(i) for i in violating_indices)
360
+ raise ValueError(
361
+ "{} received an invalid number "
362
+ "of bins at indices {}. Number of bins "
363
+ "must be at least 2, and must be an int.".format(
364
+ KBinsDiscretizer.__name__, indices
365
+ )
366
+ )
367
+ return n_bins
368
+
369
+ def transform(self, X):
370
+ """
371
+ Discretize the data.
372
+
373
+ Parameters
374
+ ----------
375
+ X : array-like of shape (n_samples, n_features)
376
+ Data to be discretized.
377
+
378
+ Returns
379
+ -------
380
+ Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}
381
+ Data in the binned space. Will be a sparse matrix if
382
+ `self.encode='onehot'` and ndarray otherwise.
383
+ """
384
+ check_is_fitted(self)
385
+
386
+ # check input and attribute dtypes
387
+ dtype = (np.float64, np.float32) if self.dtype is None else self.dtype
388
+ Xt = self._validate_data(X, copy=True, dtype=dtype, reset=False)
389
+
390
+ bin_edges = self.bin_edges_
391
+ for jj in range(Xt.shape[1]):
392
+ Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right")
393
+
394
+ if self.encode == "ordinal":
395
+ return Xt
396
+
397
+ dtype_init = None
398
+ if "onehot" in self.encode:
399
+ dtype_init = self._encoder.dtype
400
+ self._encoder.dtype = Xt.dtype
401
+ try:
402
+ Xt_enc = self._encoder.transform(Xt)
403
+ finally:
404
+ # revert the initial dtype to avoid modifying self.
405
+ self._encoder.dtype = dtype_init
406
+ return Xt_enc
407
+
408
+ def inverse_transform(self, Xt):
409
+ """
410
+ Transform discretized data back to original feature space.
411
+
412
+ Note that this function does not regenerate the original data
413
+ due to discretization rounding.
414
+
415
+ Parameters
416
+ ----------
417
+ Xt : array-like of shape (n_samples, n_features)
418
+ Transformed data in the binned space.
419
+
420
+ Returns
421
+ -------
422
+ Xinv : ndarray, dtype={np.float32, np.float64}
423
+ Data in the original feature space.
424
+ """
425
+ check_is_fitted(self)
426
+
427
+ if "onehot" in self.encode:
428
+ Xt = self._encoder.inverse_transform(Xt)
429
+
430
+ Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32))
431
+ n_features = self.n_bins_.shape[0]
432
+ if Xinv.shape[1] != n_features:
433
+ raise ValueError(
434
+ "Incorrect number of features. Expecting {}, received {}.".format(
435
+ n_features, Xinv.shape[1]
436
+ )
437
+ )
438
+
439
+ for jj in range(n_features):
440
+ bin_edges = self.bin_edges_[jj]
441
+ bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
442
+ Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)]
443
+
444
+ return Xinv
445
+
446
+ def get_feature_names_out(self, input_features=None):
447
+ """Get output feature names.
448
+
449
+ Parameters
450
+ ----------
451
+ input_features : array-like of str or None, default=None
452
+ Input features.
453
+
454
+ - If `input_features` is `None`, then `feature_names_in_` is
455
+ used as feature names in. If `feature_names_in_` is not defined,
456
+ then the following input feature names are generated:
457
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
458
+ - If `input_features` is an array-like, then `input_features` must
459
+ match `feature_names_in_` if `feature_names_in_` is defined.
460
+
461
+ Returns
462
+ -------
463
+ feature_names_out : ndarray of str objects
464
+ Transformed feature names.
465
+ """
466
+ check_is_fitted(self, "n_features_in_")
467
+ input_features = _check_feature_names_in(self, input_features)
468
+ if hasattr(self, "_encoder"):
469
+ return self._encoder.get_feature_names_out(input_features)
470
+
471
+ # ordinal encoding
472
+ return input_features
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py ADDED
@@ -0,0 +1,1678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Andreas Mueller <[email protected]>
2
+ # Joris Van den Bossche <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import numbers
6
+ import warnings
7
+ from numbers import Integral
8
+
9
+ import numpy as np
10
+ from scipy import sparse
11
+
12
+ from ..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context
13
+ from ..utils import _safe_indexing, check_array, is_scalar_nan
14
+ from ..utils._encode import _check_unknown, _encode, _get_counts, _unique
15
+ from ..utils._mask import _get_mask
16
+ from ..utils._param_validation import Interval, RealNotInt, StrOptions
17
+ from ..utils._set_output import _get_output_config
18
+ from ..utils.validation import _check_feature_names_in, check_is_fitted
19
+
20
+ __all__ = ["OneHotEncoder", "OrdinalEncoder"]
21
+
22
+
23
+ class _BaseEncoder(TransformerMixin, BaseEstimator):
24
+ """
25
+ Base class for encoders that includes the code to categorize and
26
+ transform the input features.
27
+
28
+ """
29
+
30
+ def _check_X(self, X, force_all_finite=True):
31
+ """
32
+ Perform custom check_array:
33
+ - convert list of strings to object dtype
34
+ - check for missing values for object dtype data (check_array does
35
+ not do that)
36
+ - return list of features (arrays): this list of features is
37
+ constructed feature by feature to preserve the data types
38
+ of pandas DataFrame columns, as otherwise information is lost
39
+ and cannot be used, e.g. for the `categories_` attribute.
40
+
41
+ """
42
+ if not (hasattr(X, "iloc") and getattr(X, "ndim", 0) == 2):
43
+ # if not a dataframe, do normal check_array validation
44
+ X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite)
45
+ if not hasattr(X, "dtype") and np.issubdtype(X_temp.dtype, np.str_):
46
+ X = check_array(X, dtype=object, force_all_finite=force_all_finite)
47
+ else:
48
+ X = X_temp
49
+ needs_validation = False
50
+ else:
51
+ # pandas dataframe, do validation later column by column, in order
52
+ # to keep the dtype information to be used in the encoder.
53
+ needs_validation = force_all_finite
54
+
55
+ n_samples, n_features = X.shape
56
+ X_columns = []
57
+
58
+ for i in range(n_features):
59
+ Xi = _safe_indexing(X, indices=i, axis=1)
60
+ Xi = check_array(
61
+ Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation
62
+ )
63
+ X_columns.append(Xi)
64
+
65
+ return X_columns, n_samples, n_features
66
+
67
+ def _fit(
68
+ self,
69
+ X,
70
+ handle_unknown="error",
71
+ force_all_finite=True,
72
+ return_counts=False,
73
+ return_and_ignore_missing_for_infrequent=False,
74
+ ):
75
+ self._check_infrequent_enabled()
76
+ self._check_n_features(X, reset=True)
77
+ self._check_feature_names(X, reset=True)
78
+ X_list, n_samples, n_features = self._check_X(
79
+ X, force_all_finite=force_all_finite
80
+ )
81
+ self.n_features_in_ = n_features
82
+
83
+ if self.categories != "auto":
84
+ if len(self.categories) != n_features:
85
+ raise ValueError(
86
+ "Shape mismatch: if categories is an array,"
87
+ " it has to be of shape (n_features,)."
88
+ )
89
+
90
+ self.categories_ = []
91
+ category_counts = []
92
+ compute_counts = return_counts or self._infrequent_enabled
93
+
94
+ for i in range(n_features):
95
+ Xi = X_list[i]
96
+
97
+ if self.categories == "auto":
98
+ result = _unique(Xi, return_counts=compute_counts)
99
+ if compute_counts:
100
+ cats, counts = result
101
+ category_counts.append(counts)
102
+ else:
103
+ cats = result
104
+ else:
105
+ if np.issubdtype(Xi.dtype, np.str_):
106
+ # Always convert string categories to objects to avoid
107
+ # unexpected string truncation for longer category labels
108
+ # passed in the constructor.
109
+ Xi_dtype = object
110
+ else:
111
+ Xi_dtype = Xi.dtype
112
+
113
+ cats = np.array(self.categories[i], dtype=Xi_dtype)
114
+ if (
115
+ cats.dtype == object
116
+ and isinstance(cats[0], bytes)
117
+ and Xi.dtype.kind != "S"
118
+ ):
119
+ msg = (
120
+ f"In column {i}, the predefined categories have type 'bytes'"
121
+ " which is incompatible with values of type"
122
+ f" '{type(Xi[0]).__name__}'."
123
+ )
124
+ raise ValueError(msg)
125
+
126
+ # `nan` must be the last stated category
127
+ for category in cats[:-1]:
128
+ if is_scalar_nan(category):
129
+ raise ValueError(
130
+ "Nan should be the last element in user"
131
+ f" provided categories, see categories {cats}"
132
+ f" in column #{i}"
133
+ )
134
+
135
+ if cats.size != len(_unique(cats)):
136
+ msg = (
137
+ f"In column {i}, the predefined categories"
138
+ " contain duplicate elements."
139
+ )
140
+ raise ValueError(msg)
141
+
142
+ if Xi.dtype.kind not in "OUS":
143
+ sorted_cats = np.sort(cats)
144
+ error_msg = (
145
+ "Unsorted categories are not supported for numerical categories"
146
+ )
147
+ # if there are nans, nan should be the last element
148
+ stop_idx = -1 if np.isnan(sorted_cats[-1]) else None
149
+ if np.any(sorted_cats[:stop_idx] != cats[:stop_idx]):
150
+ raise ValueError(error_msg)
151
+
152
+ if handle_unknown == "error":
153
+ diff = _check_unknown(Xi, cats)
154
+ if diff:
155
+ msg = (
156
+ "Found unknown categories {0} in column {1}"
157
+ " during fit".format(diff, i)
158
+ )
159
+ raise ValueError(msg)
160
+ if compute_counts:
161
+ category_counts.append(_get_counts(Xi, cats))
162
+
163
+ self.categories_.append(cats)
164
+
165
+ output = {"n_samples": n_samples}
166
+ if return_counts:
167
+ output["category_counts"] = category_counts
168
+
169
+ missing_indices = {}
170
+ if return_and_ignore_missing_for_infrequent:
171
+ for feature_idx, categories_for_idx in enumerate(self.categories_):
172
+ if is_scalar_nan(categories_for_idx[-1]):
173
+ # `nan` values can only be placed in the latest position
174
+ missing_indices[feature_idx] = categories_for_idx.size - 1
175
+ output["missing_indices"] = missing_indices
176
+
177
+ if self._infrequent_enabled:
178
+ self._fit_infrequent_category_mapping(
179
+ n_samples,
180
+ category_counts,
181
+ missing_indices,
182
+ )
183
+ return output
184
+
185
+ def _transform(
186
+ self,
187
+ X,
188
+ handle_unknown="error",
189
+ force_all_finite=True,
190
+ warn_on_unknown=False,
191
+ ignore_category_indices=None,
192
+ ):
193
+ X_list, n_samples, n_features = self._check_X(
194
+ X, force_all_finite=force_all_finite
195
+ )
196
+ self._check_feature_names(X, reset=False)
197
+ self._check_n_features(X, reset=False)
198
+
199
+ X_int = np.zeros((n_samples, n_features), dtype=int)
200
+ X_mask = np.ones((n_samples, n_features), dtype=bool)
201
+
202
+ columns_with_unknown = []
203
+ for i in range(n_features):
204
+ Xi = X_list[i]
205
+ diff, valid_mask = _check_unknown(Xi, self.categories_[i], return_mask=True)
206
+
207
+ if not np.all(valid_mask):
208
+ if handle_unknown == "error":
209
+ msg = (
210
+ "Found unknown categories {0} in column {1}"
211
+ " during transform".format(diff, i)
212
+ )
213
+ raise ValueError(msg)
214
+ else:
215
+ if warn_on_unknown:
216
+ columns_with_unknown.append(i)
217
+ # Set the problematic rows to an acceptable value and
218
+ # continue `The rows are marked `X_mask` and will be
219
+ # removed later.
220
+ X_mask[:, i] = valid_mask
221
+ # cast Xi into the largest string type necessary
222
+ # to handle different lengths of numpy strings
223
+ if (
224
+ self.categories_[i].dtype.kind in ("U", "S")
225
+ and self.categories_[i].itemsize > Xi.itemsize
226
+ ):
227
+ Xi = Xi.astype(self.categories_[i].dtype)
228
+ elif self.categories_[i].dtype.kind == "O" and Xi.dtype.kind == "U":
229
+ # categories are objects and Xi are numpy strings.
230
+ # Cast Xi to an object dtype to prevent truncation
231
+ # when setting invalid values.
232
+ Xi = Xi.astype("O")
233
+ else:
234
+ Xi = Xi.copy()
235
+
236
+ Xi[~valid_mask] = self.categories_[i][0]
237
+ # We use check_unknown=False, since _check_unknown was
238
+ # already called above.
239
+ X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False)
240
+ if columns_with_unknown:
241
+ warnings.warn(
242
+ (
243
+ "Found unknown categories in columns "
244
+ f"{columns_with_unknown} during transform. These "
245
+ "unknown categories will be encoded as all zeros"
246
+ ),
247
+ UserWarning,
248
+ )
249
+
250
+ self._map_infrequent_categories(X_int, X_mask, ignore_category_indices)
251
+ return X_int, X_mask
252
+
253
+ @property
254
+ def infrequent_categories_(self):
255
+ """Infrequent categories for each feature."""
256
+ # raises an AttributeError if `_infrequent_indices` is not defined
257
+ infrequent_indices = self._infrequent_indices
258
+ return [
259
+ None if indices is None else category[indices]
260
+ for category, indices in zip(self.categories_, infrequent_indices)
261
+ ]
262
+
263
+ def _check_infrequent_enabled(self):
264
+ """
265
+ This functions checks whether _infrequent_enabled is True or False.
266
+ This has to be called after parameter validation in the fit function.
267
+ """
268
+ max_categories = getattr(self, "max_categories", None)
269
+ min_frequency = getattr(self, "min_frequency", None)
270
+ self._infrequent_enabled = (
271
+ max_categories is not None and max_categories >= 1
272
+ ) or min_frequency is not None
273
+
274
+ def _identify_infrequent(self, category_count, n_samples, col_idx):
275
+ """Compute the infrequent indices.
276
+
277
+ Parameters
278
+ ----------
279
+ category_count : ndarray of shape (n_cardinality,)
280
+ Category counts.
281
+
282
+ n_samples : int
283
+ Number of samples.
284
+
285
+ col_idx : int
286
+ Index of the current category. Only used for the error message.
287
+
288
+ Returns
289
+ -------
290
+ output : ndarray of shape (n_infrequent_categories,) or None
291
+ If there are infrequent categories, indices of infrequent
292
+ categories. Otherwise None.
293
+ """
294
+ if isinstance(self.min_frequency, numbers.Integral):
295
+ infrequent_mask = category_count < self.min_frequency
296
+ elif isinstance(self.min_frequency, numbers.Real):
297
+ min_frequency_abs = n_samples * self.min_frequency
298
+ infrequent_mask = category_count < min_frequency_abs
299
+ else:
300
+ infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)
301
+
302
+ n_current_features = category_count.size - infrequent_mask.sum() + 1
303
+ if self.max_categories is not None and self.max_categories < n_current_features:
304
+ # max_categories includes the one infrequent category
305
+ frequent_category_count = self.max_categories - 1
306
+ if frequent_category_count == 0:
307
+ # All categories are infrequent
308
+ infrequent_mask[:] = True
309
+ else:
310
+ # stable sort to preserve original count order
311
+ smallest_levels = np.argsort(category_count, kind="mergesort")[
312
+ :-frequent_category_count
313
+ ]
314
+ infrequent_mask[smallest_levels] = True
315
+
316
+ output = np.flatnonzero(infrequent_mask)
317
+ return output if output.size > 0 else None
318
+
319
+ def _fit_infrequent_category_mapping(
320
+ self, n_samples, category_counts, missing_indices
321
+ ):
322
+ """Fit infrequent categories.
323
+
324
+ Defines the private attribute: `_default_to_infrequent_mappings`. For
325
+ feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping
326
+ from the integer encoding returned by `super().transform()` into
327
+ infrequent categories. If `_default_to_infrequent_mappings[i]` is None,
328
+ there were no infrequent categories in the training set.
329
+
330
+ For example if categories 0, 2 and 4 were frequent, while categories
331
+ 1, 3, 5 were infrequent for feature 7, then these categories are mapped
332
+ to a single output:
333
+ `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])`
334
+
335
+ Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]`
336
+ is an array of indices such that
337
+ `categories_[i][_infrequent_indices[i]]` are all the infrequent category
338
+ labels. If the feature `i` has no infrequent categories
339
+ `_infrequent_indices[i]` is None.
340
+
341
+ .. versionadded:: 1.1
342
+
343
+ Parameters
344
+ ----------
345
+ n_samples : int
346
+ Number of samples in training set.
347
+ category_counts: list of ndarray
348
+ `category_counts[i]` is the category counts corresponding to
349
+ `self.categories_[i]`.
350
+ missing_indices : dict
351
+ Dict mapping from feature_idx to category index with a missing value.
352
+ """
353
+ # Remove missing value from counts, so it is not considered as infrequent
354
+ if missing_indices:
355
+ category_counts_ = []
356
+ for feature_idx, count in enumerate(category_counts):
357
+ if feature_idx in missing_indices:
358
+ category_counts_.append(
359
+ np.delete(count, missing_indices[feature_idx])
360
+ )
361
+ else:
362
+ category_counts_.append(count)
363
+ else:
364
+ category_counts_ = category_counts
365
+
366
+ self._infrequent_indices = [
367
+ self._identify_infrequent(category_count, n_samples, col_idx)
368
+ for col_idx, category_count in enumerate(category_counts_)
369
+ ]
370
+
371
+ # compute mapping from default mapping to infrequent mapping
372
+ self._default_to_infrequent_mappings = []
373
+
374
+ for feature_idx, infreq_idx in enumerate(self._infrequent_indices):
375
+ cats = self.categories_[feature_idx]
376
+ # no infrequent categories
377
+ if infreq_idx is None:
378
+ self._default_to_infrequent_mappings.append(None)
379
+ continue
380
+
381
+ n_cats = len(cats)
382
+ if feature_idx in missing_indices:
383
+ # Missing index was removed from this category when computing
384
+ # infrequent indices, thus we need to decrease the number of
385
+ # total categories when considering the infrequent mapping.
386
+ n_cats -= 1
387
+
388
+ # infrequent indices exist
389
+ mapping = np.empty(n_cats, dtype=np.int64)
390
+ n_infrequent_cats = infreq_idx.size
391
+
392
+ # infrequent categories are mapped to the last element.
393
+ n_frequent_cats = n_cats - n_infrequent_cats
394
+ mapping[infreq_idx] = n_frequent_cats
395
+
396
+ frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx)
397
+ mapping[frequent_indices] = np.arange(n_frequent_cats)
398
+
399
+ self._default_to_infrequent_mappings.append(mapping)
400
+
401
+ def _map_infrequent_categories(self, X_int, X_mask, ignore_category_indices):
402
+ """Map infrequent categories to integer representing the infrequent category.
403
+
404
+ This modifies X_int in-place. Values that were invalid based on `X_mask`
405
+ are mapped to the infrequent category if there was an infrequent
406
+ category for that feature.
407
+
408
+ Parameters
409
+ ----------
410
+ X_int: ndarray of shape (n_samples, n_features)
411
+ Integer encoded categories.
412
+
413
+ X_mask: ndarray of shape (n_samples, n_features)
414
+ Bool mask for valid values in `X_int`.
415
+
416
+ ignore_category_indices : dict
417
+ Dictionary mapping from feature_idx to category index to ignore.
418
+ Ignored indexes will not be grouped and the original ordinal encoding
419
+ will remain.
420
+ """
421
+ if not self._infrequent_enabled:
422
+ return
423
+
424
+ ignore_category_indices = ignore_category_indices or {}
425
+
426
+ for col_idx in range(X_int.shape[1]):
427
+ infrequent_idx = self._infrequent_indices[col_idx]
428
+ if infrequent_idx is None:
429
+ continue
430
+
431
+ X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0]
432
+ if self.handle_unknown == "infrequent_if_exist":
433
+ # All the unknown values are now mapped to the
434
+ # infrequent_idx[0], which makes the unknown values valid
435
+ # This is needed in `transform` when the encoding is formed
436
+ # using `X_mask`.
437
+ X_mask[:, col_idx] = True
438
+
439
+ # Remaps encoding in `X_int` where the infrequent categories are
440
+ # grouped together.
441
+ for i, mapping in enumerate(self._default_to_infrequent_mappings):
442
+ if mapping is None:
443
+ continue
444
+
445
+ if i in ignore_category_indices:
446
+ # Update rows that are **not** ignored
447
+ rows_to_update = X_int[:, i] != ignore_category_indices[i]
448
+ else:
449
+ rows_to_update = slice(None)
450
+
451
+ X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i])
452
+
453
+ def _more_tags(self):
454
+ return {"X_types": ["2darray", "categorical"], "allow_nan": True}
455
+
456
+
457
+ class OneHotEncoder(_BaseEncoder):
458
+ """
459
+ Encode categorical features as a one-hot numeric array.
460
+
461
+ The input to this transformer should be an array-like of integers or
462
+ strings, denoting the values taken on by categorical (discrete) features.
463
+ The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
464
+ encoding scheme. This creates a binary column for each category and
465
+ returns a sparse matrix or dense array (depending on the ``sparse_output``
466
+ parameter).
467
+
468
+ By default, the encoder derives the categories based on the unique values
469
+ in each feature. Alternatively, you can also specify the `categories`
470
+ manually.
471
+
472
+ This encoding is needed for feeding categorical data to many scikit-learn
473
+ estimators, notably linear models and SVMs with the standard kernels.
474
+
475
+ Note: a one-hot encoding of y labels should use a LabelBinarizer
476
+ instead.
477
+
478
+ Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
479
+ For a comparison of different encoders, refer to:
480
+ :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`.
481
+
482
+ Parameters
483
+ ----------
484
+ categories : 'auto' or a list of array-like, default='auto'
485
+ Categories (unique values) per feature:
486
+
487
+ - 'auto' : Determine categories automatically from the training data.
488
+ - list : ``categories[i]`` holds the categories expected in the ith
489
+ column. The passed categories should not mix strings and numeric
490
+ values within a single feature, and should be sorted in case of
491
+ numeric values.
492
+
493
+ The used categories can be found in the ``categories_`` attribute.
494
+
495
+ .. versionadded:: 0.20
496
+
497
+ drop : {'first', 'if_binary'} or an array-like of shape (n_features,), \
498
+ default=None
499
+ Specifies a methodology to use to drop one of the categories per
500
+ feature. This is useful in situations where perfectly collinear
501
+ features cause problems, such as when feeding the resulting data
502
+ into an unregularized linear regression model.
503
+
504
+ However, dropping one category breaks the symmetry of the original
505
+ representation and can therefore induce a bias in downstream models,
506
+ for instance for penalized linear classification or regression models.
507
+
508
+ - None : retain all features (the default).
509
+ - 'first' : drop the first category in each feature. If only one
510
+ category is present, the feature will be dropped entirely.
511
+ - 'if_binary' : drop the first category in each feature with two
512
+ categories. Features with 1 or more than 2 categories are
513
+ left intact.
514
+ - array : ``drop[i]`` is the category in feature ``X[:, i]`` that
515
+ should be dropped.
516
+
517
+ When `max_categories` or `min_frequency` is configured to group
518
+ infrequent categories, the dropping behavior is handled after the
519
+ grouping.
520
+
521
+ .. versionadded:: 0.21
522
+ The parameter `drop` was added in 0.21.
523
+
524
+ .. versionchanged:: 0.23
525
+ The option `drop='if_binary'` was added in 0.23.
526
+
527
+ .. versionchanged:: 1.1
528
+ Support for dropping infrequent categories.
529
+
530
+ sparse_output : bool, default=True
531
+ When ``True``, it returns a :class:`scipy.sparse.csr_matrix`,
532
+ i.e. a sparse matrix in "Compressed Sparse Row" (CSR) format.
533
+
534
+ .. versionadded:: 1.2
535
+ `sparse` was renamed to `sparse_output`
536
+
537
+ dtype : number type, default=np.float64
538
+ Desired dtype of output.
539
+
540
+ handle_unknown : {'error', 'ignore', 'infrequent_if_exist'}, \
541
+ default='error'
542
+ Specifies the way unknown categories are handled during :meth:`transform`.
543
+
544
+ - 'error' : Raise an error if an unknown category is present during transform.
545
+ - 'ignore' : When an unknown category is encountered during
546
+ transform, the resulting one-hot encoded columns for this feature
547
+ will be all zeros. In the inverse transform, an unknown category
548
+ will be denoted as None.
549
+ - 'infrequent_if_exist' : When an unknown category is encountered
550
+ during transform, the resulting one-hot encoded columns for this
551
+ feature will map to the infrequent category if it exists. The
552
+ infrequent category will be mapped to the last position in the
553
+ encoding. During inverse transform, an unknown category will be
554
+ mapped to the category denoted `'infrequent'` if it exists. If the
555
+ `'infrequent'` category does not exist, then :meth:`transform` and
556
+ :meth:`inverse_transform` will handle an unknown category as with
557
+ `handle_unknown='ignore'`. Infrequent categories exist based on
558
+ `min_frequency` and `max_categories`. Read more in the
559
+ :ref:`User Guide <encoder_infrequent_categories>`.
560
+
561
+ .. versionchanged:: 1.1
562
+ `'infrequent_if_exist'` was added to automatically handle unknown
563
+ categories and infrequent categories.
564
+
565
+ min_frequency : int or float, default=None
566
+ Specifies the minimum frequency below which a category will be
567
+ considered infrequent.
568
+
569
+ - If `int`, categories with a smaller cardinality will be considered
570
+ infrequent.
571
+
572
+ - If `float`, categories with a smaller cardinality than
573
+ `min_frequency * n_samples` will be considered infrequent.
574
+
575
+ .. versionadded:: 1.1
576
+ Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
577
+
578
+ max_categories : int, default=None
579
+ Specifies an upper limit to the number of output features for each input
580
+ feature when considering infrequent categories. If there are infrequent
581
+ categories, `max_categories` includes the category representing the
582
+ infrequent categories along with the frequent categories. If `None`,
583
+ there is no limit to the number of output features.
584
+
585
+ .. versionadded:: 1.1
586
+ Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
587
+
588
+ feature_name_combiner : "concat" or callable, default="concat"
589
+ Callable with signature `def callable(input_feature, category)` that returns a
590
+ string. This is used to create feature names to be returned by
591
+ :meth:`get_feature_names_out`.
592
+
593
+ `"concat"` concatenates encoded feature name and category with
594
+ `feature + "_" + str(category)`.E.g. feature X with values 1, 6, 7 create
595
+ feature names `X_1, X_6, X_7`.
596
+
597
+ .. versionadded:: 1.3
598
+
599
+ Attributes
600
+ ----------
601
+ categories_ : list of arrays
602
+ The categories of each feature determined during fitting
603
+ (in order of the features in X and corresponding with the output
604
+ of ``transform``). This includes the category specified in ``drop``
605
+ (if any).
606
+
607
+ drop_idx_ : array of shape (n_features,)
608
+ - ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category
609
+ to be dropped for each feature.
610
+ - ``drop_idx_[i] = None`` if no category is to be dropped from the
611
+ feature with index ``i``, e.g. when `drop='if_binary'` and the
612
+ feature isn't binary.
613
+ - ``drop_idx_ = None`` if all the transformed features will be
614
+ retained.
615
+
616
+ If infrequent categories are enabled by setting `min_frequency` or
617
+ `max_categories` to a non-default value and `drop_idx[i]` corresponds
618
+ to a infrequent category, then the entire infrequent category is
619
+ dropped.
620
+
621
+ .. versionchanged:: 0.23
622
+ Added the possibility to contain `None` values.
623
+
624
+ infrequent_categories_ : list of ndarray
625
+ Defined only if infrequent categories are enabled by setting
626
+ `min_frequency` or `max_categories` to a non-default value.
627
+ `infrequent_categories_[i]` are the infrequent categories for feature
628
+ `i`. If the feature `i` has no infrequent categories
629
+ `infrequent_categories_[i]` is None.
630
+
631
+ .. versionadded:: 1.1
632
+
633
+ n_features_in_ : int
634
+ Number of features seen during :term:`fit`.
635
+
636
+ .. versionadded:: 1.0
637
+
638
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
639
+ Names of features seen during :term:`fit`. Defined only when `X`
640
+ has feature names that are all strings.
641
+
642
+ .. versionadded:: 1.0
643
+
644
+ feature_name_combiner : callable or None
645
+ Callable with signature `def callable(input_feature, category)` that returns a
646
+ string. This is used to create feature names to be returned by
647
+ :meth:`get_feature_names_out`.
648
+
649
+ .. versionadded:: 1.3
650
+
651
+ See Also
652
+ --------
653
+ OrdinalEncoder : Performs an ordinal (integer)
654
+ encoding of the categorical features.
655
+ TargetEncoder : Encodes categorical features using the target.
656
+ sklearn.feature_extraction.DictVectorizer : Performs a one-hot encoding of
657
+ dictionary items (also handles string-valued features).
658
+ sklearn.feature_extraction.FeatureHasher : Performs an approximate one-hot
659
+ encoding of dictionary items or strings.
660
+ LabelBinarizer : Binarizes labels in a one-vs-all
661
+ fashion.
662
+ MultiLabelBinarizer : Transforms between iterable of
663
+ iterables and a multilabel format, e.g. a (samples x classes) binary
664
+ matrix indicating the presence of a class label.
665
+
666
+ Examples
667
+ --------
668
+ Given a dataset with two features, we let the encoder find the unique
669
+ values per feature and transform the data to a binary one-hot encoding.
670
+
671
+ >>> from sklearn.preprocessing import OneHotEncoder
672
+
673
+ One can discard categories not seen during `fit`:
674
+
675
+ >>> enc = OneHotEncoder(handle_unknown='ignore')
676
+ >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
677
+ >>> enc.fit(X)
678
+ OneHotEncoder(handle_unknown='ignore')
679
+ >>> enc.categories_
680
+ [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
681
+ >>> enc.transform([['Female', 1], ['Male', 4]]).toarray()
682
+ array([[1., 0., 1., 0., 0.],
683
+ [0., 1., 0., 0., 0.]])
684
+ >>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])
685
+ array([['Male', 1],
686
+ [None, 2]], dtype=object)
687
+ >>> enc.get_feature_names_out(['gender', 'group'])
688
+ array(['gender_Female', 'gender_Male', 'group_1', 'group_2', 'group_3'], ...)
689
+
690
+ One can always drop the first column for each feature:
691
+
692
+ >>> drop_enc = OneHotEncoder(drop='first').fit(X)
693
+ >>> drop_enc.categories_
694
+ [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
695
+ >>> drop_enc.transform([['Female', 1], ['Male', 2]]).toarray()
696
+ array([[0., 0., 0.],
697
+ [1., 1., 0.]])
698
+
699
+ Or drop a column for feature only having 2 categories:
700
+
701
+ >>> drop_binary_enc = OneHotEncoder(drop='if_binary').fit(X)
702
+ >>> drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray()
703
+ array([[0., 1., 0., 0.],
704
+ [1., 0., 1., 0.]])
705
+
706
+ One can change the way feature names are created.
707
+
708
+ >>> def custom_combiner(feature, category):
709
+ ... return str(feature) + "_" + type(category).__name__ + "_" + str(category)
710
+ >>> custom_fnames_enc = OneHotEncoder(feature_name_combiner=custom_combiner).fit(X)
711
+ >>> custom_fnames_enc.get_feature_names_out()
712
+ array(['x0_str_Female', 'x0_str_Male', 'x1_int_1', 'x1_int_2', 'x1_int_3'],
713
+ dtype=object)
714
+
715
+ Infrequent categories are enabled by setting `max_categories` or `min_frequency`.
716
+
717
+ >>> import numpy as np
718
+ >>> X = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object).T
719
+ >>> ohe = OneHotEncoder(max_categories=3, sparse_output=False).fit(X)
720
+ >>> ohe.infrequent_categories_
721
+ [array(['a', 'd'], dtype=object)]
722
+ >>> ohe.transform([["a"], ["b"]])
723
+ array([[0., 0., 1.],
724
+ [1., 0., 0.]])
725
+ """
726
+
727
+ _parameter_constraints: dict = {
728
+ "categories": [StrOptions({"auto"}), list],
729
+ "drop": [StrOptions({"first", "if_binary"}), "array-like", None],
730
+ "dtype": "no_validation", # validation delegated to numpy
731
+ "handle_unknown": [StrOptions({"error", "ignore", "infrequent_if_exist"})],
732
+ "max_categories": [Interval(Integral, 1, None, closed="left"), None],
733
+ "min_frequency": [
734
+ Interval(Integral, 1, None, closed="left"),
735
+ Interval(RealNotInt, 0, 1, closed="neither"),
736
+ None,
737
+ ],
738
+ "sparse_output": ["boolean"],
739
+ "feature_name_combiner": [StrOptions({"concat"}), callable],
740
+ }
741
+
742
+ def __init__(
743
+ self,
744
+ *,
745
+ categories="auto",
746
+ drop=None,
747
+ sparse_output=True,
748
+ dtype=np.float64,
749
+ handle_unknown="error",
750
+ min_frequency=None,
751
+ max_categories=None,
752
+ feature_name_combiner="concat",
753
+ ):
754
+ self.categories = categories
755
+ self.sparse_output = sparse_output
756
+ self.dtype = dtype
757
+ self.handle_unknown = handle_unknown
758
+ self.drop = drop
759
+ self.min_frequency = min_frequency
760
+ self.max_categories = max_categories
761
+ self.feature_name_combiner = feature_name_combiner
762
+
763
+ def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx):
764
+ """Convert `drop_idx` into the index for infrequent categories.
765
+
766
+ If there are no infrequent categories, then `drop_idx` is
767
+ returned. This method is called in `_set_drop_idx` when the `drop`
768
+ parameter is an array-like.
769
+ """
770
+ if not self._infrequent_enabled:
771
+ return drop_idx
772
+
773
+ default_to_infrequent = self._default_to_infrequent_mappings[feature_idx]
774
+ if default_to_infrequent is None:
775
+ return drop_idx
776
+
777
+ # Raise error when explicitly dropping a category that is infrequent
778
+ infrequent_indices = self._infrequent_indices[feature_idx]
779
+ if infrequent_indices is not None and drop_idx in infrequent_indices:
780
+ categories = self.categories_[feature_idx]
781
+ raise ValueError(
782
+ f"Unable to drop category {categories[drop_idx].item()!r} from"
783
+ f" feature {feature_idx} because it is infrequent"
784
+ )
785
+ return default_to_infrequent[drop_idx]
786
+
787
+ def _set_drop_idx(self):
788
+ """Compute the drop indices associated with `self.categories_`.
789
+
790
+ If `self.drop` is:
791
+ - `None`, No categories have been dropped.
792
+ - `'first'`, All zeros to drop the first category.
793
+ - `'if_binary'`, All zeros if the category is binary and `None`
794
+ otherwise.
795
+ - array-like, The indices of the categories that match the
796
+ categories in `self.drop`. If the dropped category is an infrequent
797
+ category, then the index for the infrequent category is used. This
798
+ means that the entire infrequent category is dropped.
799
+
800
+ This methods defines a public `drop_idx_` and a private
801
+ `_drop_idx_after_grouping`.
802
+
803
+ - `drop_idx_`: Public facing API that references the drop category in
804
+ `self.categories_`.
805
+ - `_drop_idx_after_grouping`: Used internally to drop categories *after* the
806
+ infrequent categories are grouped together.
807
+
808
+ If there are no infrequent categories or drop is `None`, then
809
+ `drop_idx_=_drop_idx_after_grouping`.
810
+ """
811
+ if self.drop is None:
812
+ drop_idx_after_grouping = None
813
+ elif isinstance(self.drop, str):
814
+ if self.drop == "first":
815
+ drop_idx_after_grouping = np.zeros(len(self.categories_), dtype=object)
816
+ elif self.drop == "if_binary":
817
+ n_features_out_no_drop = [len(cat) for cat in self.categories_]
818
+ if self._infrequent_enabled:
819
+ for i, infreq_idx in enumerate(self._infrequent_indices):
820
+ if infreq_idx is None:
821
+ continue
822
+ n_features_out_no_drop[i] -= infreq_idx.size - 1
823
+
824
+ drop_idx_after_grouping = np.array(
825
+ [
826
+ 0 if n_features_out == 2 else None
827
+ for n_features_out in n_features_out_no_drop
828
+ ],
829
+ dtype=object,
830
+ )
831
+
832
+ else:
833
+ drop_array = np.asarray(self.drop, dtype=object)
834
+ droplen = len(drop_array)
835
+
836
+ if droplen != len(self.categories_):
837
+ msg = (
838
+ "`drop` should have length equal to the number "
839
+ "of features ({}), got {}"
840
+ )
841
+ raise ValueError(msg.format(len(self.categories_), droplen))
842
+ missing_drops = []
843
+ drop_indices = []
844
+ for feature_idx, (drop_val, cat_list) in enumerate(
845
+ zip(drop_array, self.categories_)
846
+ ):
847
+ if not is_scalar_nan(drop_val):
848
+ drop_idx = np.where(cat_list == drop_val)[0]
849
+ if drop_idx.size: # found drop idx
850
+ drop_indices.append(
851
+ self._map_drop_idx_to_infrequent(feature_idx, drop_idx[0])
852
+ )
853
+ else:
854
+ missing_drops.append((feature_idx, drop_val))
855
+ continue
856
+
857
+ # drop_val is nan, find nan in categories manually
858
+ if is_scalar_nan(cat_list[-1]):
859
+ drop_indices.append(
860
+ self._map_drop_idx_to_infrequent(feature_idx, cat_list.size - 1)
861
+ )
862
+ else: # nan is missing
863
+ missing_drops.append((feature_idx, drop_val))
864
+
865
+ if any(missing_drops):
866
+ msg = (
867
+ "The following categories were supposed to be "
868
+ "dropped, but were not found in the training "
869
+ "data.\n{}".format(
870
+ "\n".join(
871
+ [
872
+ "Category: {}, Feature: {}".format(c, v)
873
+ for c, v in missing_drops
874
+ ]
875
+ )
876
+ )
877
+ )
878
+ raise ValueError(msg)
879
+ drop_idx_after_grouping = np.array(drop_indices, dtype=object)
880
+
881
+ # `_drop_idx_after_grouping` are the categories to drop *after* the infrequent
882
+ # categories are grouped together. If needed, we remap `drop_idx` back
883
+ # to the categories seen in `self.categories_`.
884
+ self._drop_idx_after_grouping = drop_idx_after_grouping
885
+
886
+ if not self._infrequent_enabled or drop_idx_after_grouping is None:
887
+ self.drop_idx_ = self._drop_idx_after_grouping
888
+ else:
889
+ drop_idx_ = []
890
+ for feature_idx, drop_idx in enumerate(drop_idx_after_grouping):
891
+ default_to_infrequent = self._default_to_infrequent_mappings[
892
+ feature_idx
893
+ ]
894
+ if drop_idx is None or default_to_infrequent is None:
895
+ orig_drop_idx = drop_idx
896
+ else:
897
+ orig_drop_idx = np.flatnonzero(default_to_infrequent == drop_idx)[0]
898
+
899
+ drop_idx_.append(orig_drop_idx)
900
+
901
+ self.drop_idx_ = np.asarray(drop_idx_, dtype=object)
902
+
903
+ def _compute_transformed_categories(self, i, remove_dropped=True):
904
+ """Compute the transformed categories used for column `i`.
905
+
906
+ 1. If there are infrequent categories, the category is named
907
+ 'infrequent_sklearn'.
908
+ 2. Dropped columns are removed when remove_dropped=True.
909
+ """
910
+ cats = self.categories_[i]
911
+
912
+ if self._infrequent_enabled:
913
+ infreq_map = self._default_to_infrequent_mappings[i]
914
+ if infreq_map is not None:
915
+ frequent_mask = infreq_map < infreq_map.max()
916
+ infrequent_cat = "infrequent_sklearn"
917
+ # infrequent category is always at the end
918
+ cats = np.concatenate(
919
+ (cats[frequent_mask], np.array([infrequent_cat], dtype=object))
920
+ )
921
+
922
+ if remove_dropped:
923
+ cats = self._remove_dropped_categories(cats, i)
924
+ return cats
925
+
926
+ def _remove_dropped_categories(self, categories, i):
927
+ """Remove dropped categories."""
928
+ if (
929
+ self._drop_idx_after_grouping is not None
930
+ and self._drop_idx_after_grouping[i] is not None
931
+ ):
932
+ return np.delete(categories, self._drop_idx_after_grouping[i])
933
+ return categories
934
+
935
+ def _compute_n_features_outs(self):
936
+ """Compute the n_features_out for each input feature."""
937
+ output = [len(cats) for cats in self.categories_]
938
+
939
+ if self._drop_idx_after_grouping is not None:
940
+ for i, drop_idx in enumerate(self._drop_idx_after_grouping):
941
+ if drop_idx is not None:
942
+ output[i] -= 1
943
+
944
+ if not self._infrequent_enabled:
945
+ return output
946
+
947
+ # infrequent is enabled, the number of features out are reduced
948
+ # because the infrequent categories are grouped together
949
+ for i, infreq_idx in enumerate(self._infrequent_indices):
950
+ if infreq_idx is None:
951
+ continue
952
+ output[i] -= infreq_idx.size - 1
953
+
954
+ return output
955
+
956
+ @_fit_context(prefer_skip_nested_validation=True)
957
+ def fit(self, X, y=None):
958
+ """
959
+ Fit OneHotEncoder to X.
960
+
961
+ Parameters
962
+ ----------
963
+ X : array-like of shape (n_samples, n_features)
964
+ The data to determine the categories of each feature.
965
+
966
+ y : None
967
+ Ignored. This parameter exists only for compatibility with
968
+ :class:`~sklearn.pipeline.Pipeline`.
969
+
970
+ Returns
971
+ -------
972
+ self
973
+ Fitted encoder.
974
+ """
975
+ self._fit(
976
+ X,
977
+ handle_unknown=self.handle_unknown,
978
+ force_all_finite="allow-nan",
979
+ )
980
+ self._set_drop_idx()
981
+ self._n_features_outs = self._compute_n_features_outs()
982
+ return self
983
+
984
+ def transform(self, X):
985
+ """
986
+ Transform X using one-hot encoding.
987
+
988
+ If `sparse_output=True` (default), it returns an instance of
989
+ :class:`scipy.sparse._csr.csr_matrix` (CSR format).
990
+
991
+ If there are infrequent categories for a feature, set by specifying
992
+ `max_categories` or `min_frequency`, the infrequent categories are
993
+ grouped into a single category.
994
+
995
+ Parameters
996
+ ----------
997
+ X : array-like of shape (n_samples, n_features)
998
+ The data to encode.
999
+
1000
+ Returns
1001
+ -------
1002
+ X_out : {ndarray, sparse matrix} of shape \
1003
+ (n_samples, n_encoded_features)
1004
+ Transformed input. If `sparse_output=True`, a sparse matrix will be
1005
+ returned.
1006
+ """
1007
+ check_is_fitted(self)
1008
+ transform_output = _get_output_config("transform", estimator=self)["dense"]
1009
+ if transform_output != "default" and self.sparse_output:
1010
+ capitalize_transform_output = transform_output.capitalize()
1011
+ raise ValueError(
1012
+ f"{capitalize_transform_output} output does not support sparse data."
1013
+ f" Set sparse_output=False to output {transform_output} dataframes or"
1014
+ f" disable {capitalize_transform_output} output via"
1015
+ '` ohe.set_output(transform="default").'
1016
+ )
1017
+
1018
+ # validation of X happens in _check_X called by _transform
1019
+ warn_on_unknown = self.drop is not None and self.handle_unknown in {
1020
+ "ignore",
1021
+ "infrequent_if_exist",
1022
+ }
1023
+ X_int, X_mask = self._transform(
1024
+ X,
1025
+ handle_unknown=self.handle_unknown,
1026
+ force_all_finite="allow-nan",
1027
+ warn_on_unknown=warn_on_unknown,
1028
+ )
1029
+
1030
+ n_samples, n_features = X_int.shape
1031
+
1032
+ if self._drop_idx_after_grouping is not None:
1033
+ to_drop = self._drop_idx_after_grouping.copy()
1034
+ # We remove all the dropped categories from mask, and decrement all
1035
+ # categories that occur after them to avoid an empty column.
1036
+ keep_cells = X_int != to_drop
1037
+ for i, cats in enumerate(self.categories_):
1038
+ # drop='if_binary' but feature isn't binary
1039
+ if to_drop[i] is None:
1040
+ # set to cardinality to not drop from X_int
1041
+ to_drop[i] = len(cats)
1042
+
1043
+ to_drop = to_drop.reshape(1, -1)
1044
+ X_int[X_int > to_drop] -= 1
1045
+ X_mask &= keep_cells
1046
+
1047
+ mask = X_mask.ravel()
1048
+ feature_indices = np.cumsum([0] + self._n_features_outs)
1049
+ indices = (X_int + feature_indices[:-1]).ravel()[mask]
1050
+
1051
+ indptr = np.empty(n_samples + 1, dtype=int)
1052
+ indptr[0] = 0
1053
+ np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype)
1054
+ np.cumsum(indptr[1:], out=indptr[1:])
1055
+ data = np.ones(indptr[-1])
1056
+
1057
+ out = sparse.csr_matrix(
1058
+ (data, indices, indptr),
1059
+ shape=(n_samples, feature_indices[-1]),
1060
+ dtype=self.dtype,
1061
+ )
1062
+ if not self.sparse_output:
1063
+ return out.toarray()
1064
+ else:
1065
+ return out
1066
+
1067
+ def inverse_transform(self, X):
1068
+ """
1069
+ Convert the data back to the original representation.
1070
+
1071
+ When unknown categories are encountered (all zeros in the
1072
+ one-hot encoding), ``None`` is used to represent this category. If the
1073
+ feature with the unknown category has a dropped category, the dropped
1074
+ category will be its inverse.
1075
+
1076
+ For a given input feature, if there is an infrequent category,
1077
+ 'infrequent_sklearn' will be used to represent the infrequent category.
1078
+
1079
+ Parameters
1080
+ ----------
1081
+ X : {array-like, sparse matrix} of shape \
1082
+ (n_samples, n_encoded_features)
1083
+ The transformed data.
1084
+
1085
+ Returns
1086
+ -------
1087
+ X_tr : ndarray of shape (n_samples, n_features)
1088
+ Inverse transformed array.
1089
+ """
1090
+ check_is_fitted(self)
1091
+ X = check_array(X, accept_sparse="csr")
1092
+
1093
+ n_samples, _ = X.shape
1094
+ n_features = len(self.categories_)
1095
+
1096
+ n_features_out = np.sum(self._n_features_outs)
1097
+
1098
+ # validate shape of passed X
1099
+ msg = (
1100
+ "Shape of the passed X data is not correct. Expected {0} columns, got {1}."
1101
+ )
1102
+ if X.shape[1] != n_features_out:
1103
+ raise ValueError(msg.format(n_features_out, X.shape[1]))
1104
+
1105
+ transformed_features = [
1106
+ self._compute_transformed_categories(i, remove_dropped=False)
1107
+ for i, _ in enumerate(self.categories_)
1108
+ ]
1109
+
1110
+ # create resulting array of appropriate dtype
1111
+ dt = np.result_type(*[cat.dtype for cat in transformed_features])
1112
+ X_tr = np.empty((n_samples, n_features), dtype=dt)
1113
+
1114
+ j = 0
1115
+ found_unknown = {}
1116
+
1117
+ if self._infrequent_enabled:
1118
+ infrequent_indices = self._infrequent_indices
1119
+ else:
1120
+ infrequent_indices = [None] * n_features
1121
+
1122
+ for i in range(n_features):
1123
+ cats_wo_dropped = self._remove_dropped_categories(
1124
+ transformed_features[i], i
1125
+ )
1126
+ n_categories = cats_wo_dropped.shape[0]
1127
+
1128
+ # Only happens if there was a column with a unique
1129
+ # category. In this case we just fill the column with this
1130
+ # unique category value.
1131
+ if n_categories == 0:
1132
+ X_tr[:, i] = self.categories_[i][self._drop_idx_after_grouping[i]]
1133
+ j += n_categories
1134
+ continue
1135
+ sub = X[:, j : j + n_categories]
1136
+ # for sparse X argmax returns 2D matrix, ensure 1D array
1137
+ labels = np.asarray(sub.argmax(axis=1)).flatten()
1138
+ X_tr[:, i] = cats_wo_dropped[labels]
1139
+
1140
+ if self.handle_unknown == "ignore" or (
1141
+ self.handle_unknown == "infrequent_if_exist"
1142
+ and infrequent_indices[i] is None
1143
+ ):
1144
+ unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
1145
+ # ignored unknown categories: we have a row of all zero
1146
+ if unknown.any():
1147
+ # if categories were dropped then unknown categories will
1148
+ # be mapped to the dropped category
1149
+ if (
1150
+ self._drop_idx_after_grouping is None
1151
+ or self._drop_idx_after_grouping[i] is None
1152
+ ):
1153
+ found_unknown[i] = unknown
1154
+ else:
1155
+ X_tr[unknown, i] = self.categories_[i][
1156
+ self._drop_idx_after_grouping[i]
1157
+ ]
1158
+ else:
1159
+ dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
1160
+ if dropped.any():
1161
+ if self._drop_idx_after_grouping is None:
1162
+ all_zero_samples = np.flatnonzero(dropped)
1163
+ raise ValueError(
1164
+ f"Samples {all_zero_samples} can not be inverted "
1165
+ "when drop=None and handle_unknown='error' "
1166
+ "because they contain all zeros"
1167
+ )
1168
+ # we can safely assume that all of the nulls in each column
1169
+ # are the dropped value
1170
+ drop_idx = self._drop_idx_after_grouping[i]
1171
+ X_tr[dropped, i] = transformed_features[i][drop_idx]
1172
+
1173
+ j += n_categories
1174
+
1175
+ # if ignored are found: potentially need to upcast result to
1176
+ # insert None values
1177
+ if found_unknown:
1178
+ if X_tr.dtype != object:
1179
+ X_tr = X_tr.astype(object)
1180
+
1181
+ for idx, mask in found_unknown.items():
1182
+ X_tr[mask, idx] = None
1183
+
1184
+ return X_tr
1185
+
1186
+ def get_feature_names_out(self, input_features=None):
1187
+ """Get output feature names for transformation.
1188
+
1189
+ Parameters
1190
+ ----------
1191
+ input_features : array-like of str or None, default=None
1192
+ Input features.
1193
+
1194
+ - If `input_features` is `None`, then `feature_names_in_` is
1195
+ used as feature names in. If `feature_names_in_` is not defined,
1196
+ then the following input feature names are generated:
1197
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
1198
+ - If `input_features` is an array-like, then `input_features` must
1199
+ match `feature_names_in_` if `feature_names_in_` is defined.
1200
+
1201
+ Returns
1202
+ -------
1203
+ feature_names_out : ndarray of str objects
1204
+ Transformed feature names.
1205
+ """
1206
+ check_is_fitted(self)
1207
+ input_features = _check_feature_names_in(self, input_features)
1208
+ cats = [
1209
+ self._compute_transformed_categories(i)
1210
+ for i, _ in enumerate(self.categories_)
1211
+ ]
1212
+
1213
+ name_combiner = self._check_get_feature_name_combiner()
1214
+ feature_names = []
1215
+ for i in range(len(cats)):
1216
+ names = [name_combiner(input_features[i], t) for t in cats[i]]
1217
+ feature_names.extend(names)
1218
+
1219
+ return np.array(feature_names, dtype=object)
1220
+
1221
+ def _check_get_feature_name_combiner(self):
1222
+ if self.feature_name_combiner == "concat":
1223
+ return lambda feature, category: feature + "_" + str(category)
1224
+ else: # callable
1225
+ dry_run_combiner = self.feature_name_combiner("feature", "category")
1226
+ if not isinstance(dry_run_combiner, str):
1227
+ raise TypeError(
1228
+ "When `feature_name_combiner` is a callable, it should return a "
1229
+ f"Python string. Got {type(dry_run_combiner)} instead."
1230
+ )
1231
+ return self.feature_name_combiner
1232
+
1233
+
1234
+ class OrdinalEncoder(OneToOneFeatureMixin, _BaseEncoder):
1235
+ """
1236
+ Encode categorical features as an integer array.
1237
+
1238
+ The input to this transformer should be an array-like of integers or
1239
+ strings, denoting the values taken on by categorical (discrete) features.
1240
+ The features are converted to ordinal integers. This results in
1241
+ a single column of integers (0 to n_categories - 1) per feature.
1242
+
1243
+ Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
1244
+ For a comparison of different encoders, refer to:
1245
+ :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`.
1246
+
1247
+ .. versionadded:: 0.20
1248
+
1249
+ Parameters
1250
+ ----------
1251
+ categories : 'auto' or a list of array-like, default='auto'
1252
+ Categories (unique values) per feature:
1253
+
1254
+ - 'auto' : Determine categories automatically from the training data.
1255
+ - list : ``categories[i]`` holds the categories expected in the ith
1256
+ column. The passed categories should not mix strings and numeric
1257
+ values, and should be sorted in case of numeric values.
1258
+
1259
+ The used categories can be found in the ``categories_`` attribute.
1260
+
1261
+ dtype : number type, default=np.float64
1262
+ Desired dtype of output.
1263
+
1264
+ handle_unknown : {'error', 'use_encoded_value'}, default='error'
1265
+ When set to 'error' an error will be raised in case an unknown
1266
+ categorical feature is present during transform. When set to
1267
+ 'use_encoded_value', the encoded value of unknown categories will be
1268
+ set to the value given for the parameter `unknown_value`. In
1269
+ :meth:`inverse_transform`, an unknown category will be denoted as None.
1270
+
1271
+ .. versionadded:: 0.24
1272
+
1273
+ unknown_value : int or np.nan, default=None
1274
+ When the parameter handle_unknown is set to 'use_encoded_value', this
1275
+ parameter is required and will set the encoded value of unknown
1276
+ categories. It has to be distinct from the values used to encode any of
1277
+ the categories in `fit`. If set to np.nan, the `dtype` parameter must
1278
+ be a float dtype.
1279
+
1280
+ .. versionadded:: 0.24
1281
+
1282
+ encoded_missing_value : int or np.nan, default=np.nan
1283
+ Encoded value of missing categories. If set to `np.nan`, then the `dtype`
1284
+ parameter must be a float dtype.
1285
+
1286
+ .. versionadded:: 1.1
1287
+
1288
+ min_frequency : int or float, default=None
1289
+ Specifies the minimum frequency below which a category will be
1290
+ considered infrequent.
1291
+
1292
+ - If `int`, categories with a smaller cardinality will be considered
1293
+ infrequent.
1294
+
1295
+ - If `float`, categories with a smaller cardinality than
1296
+ `min_frequency * n_samples` will be considered infrequent.
1297
+
1298
+ .. versionadded:: 1.3
1299
+ Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
1300
+
1301
+ max_categories : int, default=None
1302
+ Specifies an upper limit to the number of output categories for each input
1303
+ feature when considering infrequent categories. If there are infrequent
1304
+ categories, `max_categories` includes the category representing the
1305
+ infrequent categories along with the frequent categories. If `None`,
1306
+ there is no limit to the number of output features.
1307
+
1308
+ `max_categories` do **not** take into account missing or unknown
1309
+ categories. Setting `unknown_value` or `encoded_missing_value` to an
1310
+ integer will increase the number of unique integer codes by one each.
1311
+ This can result in up to `max_categories + 2` integer codes.
1312
+
1313
+ .. versionadded:: 1.3
1314
+ Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
1315
+
1316
+ Attributes
1317
+ ----------
1318
+ categories_ : list of arrays
1319
+ The categories of each feature determined during ``fit`` (in order of
1320
+ the features in X and corresponding with the output of ``transform``).
1321
+ This does not include categories that weren't seen during ``fit``.
1322
+
1323
+ n_features_in_ : int
1324
+ Number of features seen during :term:`fit`.
1325
+
1326
+ .. versionadded:: 1.0
1327
+
1328
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1329
+ Names of features seen during :term:`fit`. Defined only when `X`
1330
+ has feature names that are all strings.
1331
+
1332
+ .. versionadded:: 1.0
1333
+
1334
+ infrequent_categories_ : list of ndarray
1335
+ Defined only if infrequent categories are enabled by setting
1336
+ `min_frequency` or `max_categories` to a non-default value.
1337
+ `infrequent_categories_[i]` are the infrequent categories for feature
1338
+ `i`. If the feature `i` has no infrequent categories
1339
+ `infrequent_categories_[i]` is None.
1340
+
1341
+ .. versionadded:: 1.3
1342
+
1343
+ See Also
1344
+ --------
1345
+ OneHotEncoder : Performs a one-hot encoding of categorical features. This encoding
1346
+ is suitable for low to medium cardinality categorical variables, both in
1347
+ supervised and unsupervised settings.
1348
+ TargetEncoder : Encodes categorical features using supervised signal
1349
+ in a classification or regression pipeline. This encoding is typically
1350
+ suitable for high cardinality categorical variables.
1351
+ LabelEncoder : Encodes target labels with values between 0 and
1352
+ ``n_classes-1``.
1353
+
1354
+ Notes
1355
+ -----
1356
+ With a high proportion of `nan` values, inferring categories becomes slow with
1357
+ Python versions before 3.10. The handling of `nan` values was improved
1358
+ from Python 3.10 onwards, (c.f.
1359
+ `bpo-43475 <https://github.com/python/cpython/issues/87641>`_).
1360
+
1361
+ Examples
1362
+ --------
1363
+ Given a dataset with two features, we let the encoder find the unique
1364
+ values per feature and transform the data to an ordinal encoding.
1365
+
1366
+ >>> from sklearn.preprocessing import OrdinalEncoder
1367
+ >>> enc = OrdinalEncoder()
1368
+ >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
1369
+ >>> enc.fit(X)
1370
+ OrdinalEncoder()
1371
+ >>> enc.categories_
1372
+ [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
1373
+ >>> enc.transform([['Female', 3], ['Male', 1]])
1374
+ array([[0., 2.],
1375
+ [1., 0.]])
1376
+
1377
+ >>> enc.inverse_transform([[1, 0], [0, 1]])
1378
+ array([['Male', 1],
1379
+ ['Female', 2]], dtype=object)
1380
+
1381
+ By default, :class:`OrdinalEncoder` is lenient towards missing values by
1382
+ propagating them.
1383
+
1384
+ >>> import numpy as np
1385
+ >>> X = [['Male', 1], ['Female', 3], ['Female', np.nan]]
1386
+ >>> enc.fit_transform(X)
1387
+ array([[ 1., 0.],
1388
+ [ 0., 1.],
1389
+ [ 0., nan]])
1390
+
1391
+ You can use the parameter `encoded_missing_value` to encode missing values.
1392
+
1393
+ >>> enc.set_params(encoded_missing_value=-1).fit_transform(X)
1394
+ array([[ 1., 0.],
1395
+ [ 0., 1.],
1396
+ [ 0., -1.]])
1397
+
1398
+ Infrequent categories are enabled by setting `max_categories` or `min_frequency`.
1399
+ In the following example, "a" and "d" are considered infrequent and grouped
1400
+ together into a single category, "b" and "c" are their own categories, unknown
1401
+ values are encoded as 3 and missing values are encoded as 4.
1402
+
1403
+ >>> X_train = np.array(
1404
+ ... [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]],
1405
+ ... dtype=object).T
1406
+ >>> enc = OrdinalEncoder(
1407
+ ... handle_unknown="use_encoded_value", unknown_value=3,
1408
+ ... max_categories=3, encoded_missing_value=4)
1409
+ >>> _ = enc.fit(X_train)
1410
+ >>> X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object)
1411
+ >>> enc.transform(X_test)
1412
+ array([[2.],
1413
+ [0.],
1414
+ [1.],
1415
+ [2.],
1416
+ [3.],
1417
+ [4.]])
1418
+ """
1419
+
1420
+ _parameter_constraints: dict = {
1421
+ "categories": [StrOptions({"auto"}), list],
1422
+ "dtype": "no_validation", # validation delegated to numpy
1423
+ "encoded_missing_value": [Integral, type(np.nan)],
1424
+ "handle_unknown": [StrOptions({"error", "use_encoded_value"})],
1425
+ "unknown_value": [Integral, type(np.nan), None],
1426
+ "max_categories": [Interval(Integral, 1, None, closed="left"), None],
1427
+ "min_frequency": [
1428
+ Interval(Integral, 1, None, closed="left"),
1429
+ Interval(RealNotInt, 0, 1, closed="neither"),
1430
+ None,
1431
+ ],
1432
+ }
1433
+
1434
+ def __init__(
1435
+ self,
1436
+ *,
1437
+ categories="auto",
1438
+ dtype=np.float64,
1439
+ handle_unknown="error",
1440
+ unknown_value=None,
1441
+ encoded_missing_value=np.nan,
1442
+ min_frequency=None,
1443
+ max_categories=None,
1444
+ ):
1445
+ self.categories = categories
1446
+ self.dtype = dtype
1447
+ self.handle_unknown = handle_unknown
1448
+ self.unknown_value = unknown_value
1449
+ self.encoded_missing_value = encoded_missing_value
1450
+ self.min_frequency = min_frequency
1451
+ self.max_categories = max_categories
1452
+
1453
+ @_fit_context(prefer_skip_nested_validation=True)
1454
+ def fit(self, X, y=None):
1455
+ """
1456
+ Fit the OrdinalEncoder to X.
1457
+
1458
+ Parameters
1459
+ ----------
1460
+ X : array-like of shape (n_samples, n_features)
1461
+ The data to determine the categories of each feature.
1462
+
1463
+ y : None
1464
+ Ignored. This parameter exists only for compatibility with
1465
+ :class:`~sklearn.pipeline.Pipeline`.
1466
+
1467
+ Returns
1468
+ -------
1469
+ self : object
1470
+ Fitted encoder.
1471
+ """
1472
+ if self.handle_unknown == "use_encoded_value":
1473
+ if is_scalar_nan(self.unknown_value):
1474
+ if np.dtype(self.dtype).kind != "f":
1475
+ raise ValueError(
1476
+ "When unknown_value is np.nan, the dtype "
1477
+ "parameter should be "
1478
+ f"a float dtype. Got {self.dtype}."
1479
+ )
1480
+ elif not isinstance(self.unknown_value, numbers.Integral):
1481
+ raise TypeError(
1482
+ "unknown_value should be an integer or "
1483
+ "np.nan when "
1484
+ "handle_unknown is 'use_encoded_value', "
1485
+ f"got {self.unknown_value}."
1486
+ )
1487
+ elif self.unknown_value is not None:
1488
+ raise TypeError(
1489
+ "unknown_value should only be set when "
1490
+ "handle_unknown is 'use_encoded_value', "
1491
+ f"got {self.unknown_value}."
1492
+ )
1493
+
1494
+ # `_fit` will only raise an error when `self.handle_unknown="error"`
1495
+ fit_results = self._fit(
1496
+ X,
1497
+ handle_unknown=self.handle_unknown,
1498
+ force_all_finite="allow-nan",
1499
+ return_and_ignore_missing_for_infrequent=True,
1500
+ )
1501
+ self._missing_indices = fit_results["missing_indices"]
1502
+
1503
+ cardinalities = [len(categories) for categories in self.categories_]
1504
+ if self._infrequent_enabled:
1505
+ # Cardinality decreases because the infrequent categories are grouped
1506
+ # together
1507
+ for feature_idx, infrequent in enumerate(self.infrequent_categories_):
1508
+ if infrequent is not None:
1509
+ cardinalities[feature_idx] -= len(infrequent)
1510
+
1511
+ # missing values are not considered part of the cardinality
1512
+ # when considering unknown categories or encoded_missing_value
1513
+ for cat_idx, categories_for_idx in enumerate(self.categories_):
1514
+ if is_scalar_nan(categories_for_idx[-1]):
1515
+ cardinalities[cat_idx] -= 1
1516
+
1517
+ if self.handle_unknown == "use_encoded_value":
1518
+ for cardinality in cardinalities:
1519
+ if 0 <= self.unknown_value < cardinality:
1520
+ raise ValueError(
1521
+ "The used value for unknown_value "
1522
+ f"{self.unknown_value} is one of the "
1523
+ "values already used for encoding the "
1524
+ "seen categories."
1525
+ )
1526
+
1527
+ if self._missing_indices:
1528
+ if np.dtype(self.dtype).kind != "f" and is_scalar_nan(
1529
+ self.encoded_missing_value
1530
+ ):
1531
+ raise ValueError(
1532
+ "There are missing values in features "
1533
+ f"{list(self._missing_indices)}. For OrdinalEncoder to "
1534
+ f"encode missing values with dtype: {self.dtype}, set "
1535
+ "encoded_missing_value to a non-nan value, or "
1536
+ "set dtype to a float"
1537
+ )
1538
+
1539
+ if not is_scalar_nan(self.encoded_missing_value):
1540
+ # Features are invalid when they contain a missing category
1541
+ # and encoded_missing_value was already used to encode a
1542
+ # known category
1543
+ invalid_features = [
1544
+ cat_idx
1545
+ for cat_idx, cardinality in enumerate(cardinalities)
1546
+ if cat_idx in self._missing_indices
1547
+ and 0 <= self.encoded_missing_value < cardinality
1548
+ ]
1549
+
1550
+ if invalid_features:
1551
+ # Use feature names if they are available
1552
+ if hasattr(self, "feature_names_in_"):
1553
+ invalid_features = self.feature_names_in_[invalid_features]
1554
+ raise ValueError(
1555
+ f"encoded_missing_value ({self.encoded_missing_value}) "
1556
+ "is already used to encode a known category in features: "
1557
+ f"{invalid_features}"
1558
+ )
1559
+
1560
+ return self
1561
+
1562
+ def transform(self, X):
1563
+ """
1564
+ Transform X to ordinal codes.
1565
+
1566
+ Parameters
1567
+ ----------
1568
+ X : array-like of shape (n_samples, n_features)
1569
+ The data to encode.
1570
+
1571
+ Returns
1572
+ -------
1573
+ X_out : ndarray of shape (n_samples, n_features)
1574
+ Transformed input.
1575
+ """
1576
+ check_is_fitted(self, "categories_")
1577
+ X_int, X_mask = self._transform(
1578
+ X,
1579
+ handle_unknown=self.handle_unknown,
1580
+ force_all_finite="allow-nan",
1581
+ ignore_category_indices=self._missing_indices,
1582
+ )
1583
+ X_trans = X_int.astype(self.dtype, copy=False)
1584
+
1585
+ for cat_idx, missing_idx in self._missing_indices.items():
1586
+ X_missing_mask = X_int[:, cat_idx] == missing_idx
1587
+ X_trans[X_missing_mask, cat_idx] = self.encoded_missing_value
1588
+
1589
+ # create separate category for unknown values
1590
+ if self.handle_unknown == "use_encoded_value":
1591
+ X_trans[~X_mask] = self.unknown_value
1592
+ return X_trans
1593
+
1594
+ def inverse_transform(self, X):
1595
+ """
1596
+ Convert the data back to the original representation.
1597
+
1598
+ Parameters
1599
+ ----------
1600
+ X : array-like of shape (n_samples, n_encoded_features)
1601
+ The transformed data.
1602
+
1603
+ Returns
1604
+ -------
1605
+ X_tr : ndarray of shape (n_samples, n_features)
1606
+ Inverse transformed array.
1607
+ """
1608
+ check_is_fitted(self)
1609
+ X = check_array(X, force_all_finite="allow-nan")
1610
+
1611
+ n_samples, _ = X.shape
1612
+ n_features = len(self.categories_)
1613
+
1614
+ # validate shape of passed X
1615
+ msg = (
1616
+ "Shape of the passed X data is not correct. Expected {0} columns, got {1}."
1617
+ )
1618
+ if X.shape[1] != n_features:
1619
+ raise ValueError(msg.format(n_features, X.shape[1]))
1620
+
1621
+ # create resulting array of appropriate dtype
1622
+ dt = np.result_type(*[cat.dtype for cat in self.categories_])
1623
+ X_tr = np.empty((n_samples, n_features), dtype=dt)
1624
+
1625
+ found_unknown = {}
1626
+ infrequent_masks = {}
1627
+
1628
+ infrequent_indices = getattr(self, "_infrequent_indices", None)
1629
+
1630
+ for i in range(n_features):
1631
+ labels = X[:, i]
1632
+
1633
+ # replace values of X[:, i] that were nan with actual indices
1634
+ if i in self._missing_indices:
1635
+ X_i_mask = _get_mask(labels, self.encoded_missing_value)
1636
+ labels[X_i_mask] = self._missing_indices[i]
1637
+
1638
+ rows_to_update = slice(None)
1639
+ categories = self.categories_[i]
1640
+
1641
+ if infrequent_indices is not None and infrequent_indices[i] is not None:
1642
+ # Compute mask for frequent categories
1643
+ infrequent_encoding_value = len(categories) - len(infrequent_indices[i])
1644
+ infrequent_masks[i] = labels == infrequent_encoding_value
1645
+ rows_to_update = ~infrequent_masks[i]
1646
+
1647
+ # Remap categories to be only frequent categories. The infrequent
1648
+ # categories will be mapped to "infrequent_sklearn" later
1649
+ frequent_categories_mask = np.ones_like(categories, dtype=bool)
1650
+ frequent_categories_mask[infrequent_indices[i]] = False
1651
+ categories = categories[frequent_categories_mask]
1652
+
1653
+ if self.handle_unknown == "use_encoded_value":
1654
+ unknown_labels = _get_mask(labels, self.unknown_value)
1655
+ found_unknown[i] = unknown_labels
1656
+
1657
+ known_labels = ~unknown_labels
1658
+ if isinstance(rows_to_update, np.ndarray):
1659
+ rows_to_update &= known_labels
1660
+ else:
1661
+ rows_to_update = known_labels
1662
+
1663
+ labels_int = labels[rows_to_update].astype("int64", copy=False)
1664
+ X_tr[rows_to_update, i] = categories[labels_int]
1665
+
1666
+ if found_unknown or infrequent_masks:
1667
+ X_tr = X_tr.astype(object, copy=False)
1668
+
1669
+ # insert None values for unknown values
1670
+ if found_unknown:
1671
+ for idx, mask in found_unknown.items():
1672
+ X_tr[mask, idx] = None
1673
+
1674
+ if infrequent_masks:
1675
+ for idx, mask in infrequent_masks.items():
1676
+ X_tr[mask, idx] = "infrequent_sklearn"
1677
+
1678
+ return X_tr
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+
5
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
6
+ from ..utils._param_validation import StrOptions
7
+ from ..utils._set_output import ADAPTERS_MANAGER, _get_output_config
8
+ from ..utils.metaestimators import available_if
9
+ from ..utils.validation import (
10
+ _allclose_dense_sparse,
11
+ _check_feature_names_in,
12
+ _get_feature_names,
13
+ _is_pandas_df,
14
+ _is_polars_df,
15
+ check_array,
16
+ )
17
+
18
+
19
+ def _get_adapter_from_container(container):
20
+ """Get the adapter that nows how to handle such container.
21
+
22
+ See :class:`sklearn.utils._set_output.ContainerAdapterProtocol` for more
23
+ details.
24
+ """
25
+ module_name = container.__class__.__module__.split(".")[0]
26
+ try:
27
+ return ADAPTERS_MANAGER.adapters[module_name]
28
+ except KeyError as exc:
29
+ available_adapters = list(ADAPTERS_MANAGER.adapters.keys())
30
+ raise ValueError(
31
+ "The container does not have a registered adapter in scikit-learn. "
32
+ f"Available adapters are: {available_adapters} while the container "
33
+ f"provided is: {container!r}."
34
+ ) from exc
35
+
36
+
37
+ def _identity(X):
38
+ """The identity function."""
39
+ return X
40
+
41
+
42
+ class FunctionTransformer(TransformerMixin, BaseEstimator):
43
+ """Constructs a transformer from an arbitrary callable.
44
+
45
+ A FunctionTransformer forwards its X (and optionally y) arguments to a
46
+ user-defined function or function object and returns the result of this
47
+ function. This is useful for stateless transformations such as taking the
48
+ log of frequencies, doing custom scaling, etc.
49
+
50
+ Note: If a lambda is used as the function, then the resulting
51
+ transformer will not be pickleable.
52
+
53
+ .. versionadded:: 0.17
54
+
55
+ Read more in the :ref:`User Guide <function_transformer>`.
56
+
57
+ Parameters
58
+ ----------
59
+ func : callable, default=None
60
+ The callable to use for the transformation. This will be passed
61
+ the same arguments as transform, with args and kwargs forwarded.
62
+ If func is None, then func will be the identity function.
63
+
64
+ inverse_func : callable, default=None
65
+ The callable to use for the inverse transformation. This will be
66
+ passed the same arguments as inverse transform, with args and
67
+ kwargs forwarded. If inverse_func is None, then inverse_func
68
+ will be the identity function.
69
+
70
+ validate : bool, default=False
71
+ Indicate that the input X array should be checked before calling
72
+ ``func``. The possibilities are:
73
+
74
+ - If False, there is no input validation.
75
+ - If True, then X will be converted to a 2-dimensional NumPy array or
76
+ sparse matrix. If the conversion is not possible an exception is
77
+ raised.
78
+
79
+ .. versionchanged:: 0.22
80
+ The default of ``validate`` changed from True to False.
81
+
82
+ accept_sparse : bool, default=False
83
+ Indicate that func accepts a sparse matrix as input. If validate is
84
+ False, this has no effect. Otherwise, if accept_sparse is false,
85
+ sparse matrix inputs will cause an exception to be raised.
86
+
87
+ check_inverse : bool, default=True
88
+ Whether to check that or ``func`` followed by ``inverse_func`` leads to
89
+ the original inputs. It can be used for a sanity check, raising a
90
+ warning when the condition is not fulfilled.
91
+
92
+ .. versionadded:: 0.20
93
+
94
+ feature_names_out : callable, 'one-to-one' or None, default=None
95
+ Determines the list of feature names that will be returned by the
96
+ `get_feature_names_out` method. If it is 'one-to-one', then the output
97
+ feature names will be equal to the input feature names. If it is a
98
+ callable, then it must take two positional arguments: this
99
+ `FunctionTransformer` (`self`) and an array-like of input feature names
100
+ (`input_features`). It must return an array-like of output feature
101
+ names. The `get_feature_names_out` method is only defined if
102
+ `feature_names_out` is not None.
103
+
104
+ See ``get_feature_names_out`` for more details.
105
+
106
+ .. versionadded:: 1.1
107
+
108
+ kw_args : dict, default=None
109
+ Dictionary of additional keyword arguments to pass to func.
110
+
111
+ .. versionadded:: 0.18
112
+
113
+ inv_kw_args : dict, default=None
114
+ Dictionary of additional keyword arguments to pass to inverse_func.
115
+
116
+ .. versionadded:: 0.18
117
+
118
+ Attributes
119
+ ----------
120
+ n_features_in_ : int
121
+ Number of features seen during :term:`fit`.
122
+
123
+ .. versionadded:: 0.24
124
+
125
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
126
+ Names of features seen during :term:`fit`. Defined only when `X` has feature
127
+ names that are all strings.
128
+
129
+ .. versionadded:: 1.0
130
+
131
+ See Also
132
+ --------
133
+ MaxAbsScaler : Scale each feature by its maximum absolute value.
134
+ StandardScaler : Standardize features by removing the mean and
135
+ scaling to unit variance.
136
+ LabelBinarizer : Binarize labels in a one-vs-all fashion.
137
+ MultiLabelBinarizer : Transform between iterable of iterables
138
+ and a multilabel format.
139
+
140
+ Notes
141
+ -----
142
+ If `func` returns an output with a `columns` attribute, then the columns is enforced
143
+ to be consistent with the output of `get_feature_names_out`.
144
+
145
+ Examples
146
+ --------
147
+ >>> import numpy as np
148
+ >>> from sklearn.preprocessing import FunctionTransformer
149
+ >>> transformer = FunctionTransformer(np.log1p)
150
+ >>> X = np.array([[0, 1], [2, 3]])
151
+ >>> transformer.transform(X)
152
+ array([[0. , 0.6931...],
153
+ [1.0986..., 1.3862...]])
154
+ """
155
+
156
+ _parameter_constraints: dict = {
157
+ "func": [callable, None],
158
+ "inverse_func": [callable, None],
159
+ "validate": ["boolean"],
160
+ "accept_sparse": ["boolean"],
161
+ "check_inverse": ["boolean"],
162
+ "feature_names_out": [callable, StrOptions({"one-to-one"}), None],
163
+ "kw_args": [dict, None],
164
+ "inv_kw_args": [dict, None],
165
+ }
166
+
167
+ def __init__(
168
+ self,
169
+ func=None,
170
+ inverse_func=None,
171
+ *,
172
+ validate=False,
173
+ accept_sparse=False,
174
+ check_inverse=True,
175
+ feature_names_out=None,
176
+ kw_args=None,
177
+ inv_kw_args=None,
178
+ ):
179
+ self.func = func
180
+ self.inverse_func = inverse_func
181
+ self.validate = validate
182
+ self.accept_sparse = accept_sparse
183
+ self.check_inverse = check_inverse
184
+ self.feature_names_out = feature_names_out
185
+ self.kw_args = kw_args
186
+ self.inv_kw_args = inv_kw_args
187
+
188
+ def _check_input(self, X, *, reset):
189
+ if self.validate:
190
+ return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset)
191
+ elif reset:
192
+ # Set feature_names_in_ and n_features_in_ even if validate=False
193
+ # We run this only when reset==True to store the attributes but not
194
+ # validate them, because validate=False
195
+ self._check_n_features(X, reset=reset)
196
+ self._check_feature_names(X, reset=reset)
197
+ return X
198
+
199
+ def _check_inverse_transform(self, X):
200
+ """Check that func and inverse_func are the inverse."""
201
+ idx_selected = slice(None, None, max(1, X.shape[0] // 100))
202
+ X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))
203
+
204
+ if hasattr(X, "dtype"):
205
+ dtypes = [X.dtype]
206
+ elif hasattr(X, "dtypes"):
207
+ # Dataframes can have multiple dtypes
208
+ dtypes = X.dtypes
209
+
210
+ if not all(np.issubdtype(d, np.number) for d in dtypes):
211
+ raise ValueError(
212
+ "'check_inverse' is only supported when all the elements in `X` is"
213
+ " numerical."
214
+ )
215
+
216
+ if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
217
+ warnings.warn(
218
+ (
219
+ "The provided functions are not strictly"
220
+ " inverse of each other. If you are sure you"
221
+ " want to proceed regardless, set"
222
+ " 'check_inverse=False'."
223
+ ),
224
+ UserWarning,
225
+ )
226
+
227
+ @_fit_context(prefer_skip_nested_validation=True)
228
+ def fit(self, X, y=None):
229
+ """Fit transformer by checking X.
230
+
231
+ If ``validate`` is ``True``, ``X`` will be checked.
232
+
233
+ Parameters
234
+ ----------
235
+ X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
236
+ if `validate=True` else any object that `func` can handle
237
+ Input array.
238
+
239
+ y : Ignored
240
+ Not used, present here for API consistency by convention.
241
+
242
+ Returns
243
+ -------
244
+ self : object
245
+ FunctionTransformer class instance.
246
+ """
247
+ X = self._check_input(X, reset=True)
248
+ if self.check_inverse and not (self.func is None or self.inverse_func is None):
249
+ self._check_inverse_transform(X)
250
+ return self
251
+
252
+ def transform(self, X):
253
+ """Transform X using the forward function.
254
+
255
+ Parameters
256
+ ----------
257
+ X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
258
+ if `validate=True` else any object that `func` can handle
259
+ Input array.
260
+
261
+ Returns
262
+ -------
263
+ X_out : array-like, shape (n_samples, n_features)
264
+ Transformed input.
265
+ """
266
+ X = self._check_input(X, reset=False)
267
+ out = self._transform(X, func=self.func, kw_args=self.kw_args)
268
+ output_config = _get_output_config("transform", self)["dense"]
269
+
270
+ if hasattr(out, "columns") and self.feature_names_out is not None:
271
+ # check the consistency between the column provided by `transform` and
272
+ # the the column names provided by `get_feature_names_out`.
273
+ feature_names_out = self.get_feature_names_out()
274
+ if list(out.columns) != list(feature_names_out):
275
+ # we can override the column names of the output if it is inconsistent
276
+ # with the column names provided by `get_feature_names_out` in the
277
+ # following cases:
278
+ # * `func` preserved the column names between the input and the output
279
+ # * the input column names are all numbers
280
+ # * the output is requested to be a DataFrame (pandas or polars)
281
+ feature_names_in = getattr(
282
+ X, "feature_names_in_", _get_feature_names(X)
283
+ )
284
+ same_feature_names_in_out = feature_names_in is not None and list(
285
+ feature_names_in
286
+ ) == list(out.columns)
287
+ not_all_str_columns = not all(
288
+ isinstance(col, str) for col in out.columns
289
+ )
290
+ if same_feature_names_in_out or not_all_str_columns:
291
+ adapter = _get_adapter_from_container(out)
292
+ out = adapter.create_container(
293
+ X_output=out,
294
+ X_original=out,
295
+ columns=feature_names_out,
296
+ inplace=False,
297
+ )
298
+ else:
299
+ raise ValueError(
300
+ "The output generated by `func` have different column names "
301
+ "than the ones provided by `get_feature_names_out`. "
302
+ f"Got output with columns names: {list(out.columns)} and "
303
+ "`get_feature_names_out` returned: "
304
+ f"{list(self.get_feature_names_out())}. "
305
+ "The column names can be overridden by setting "
306
+ "`set_output(transform='pandas')` or "
307
+ "`set_output(transform='polars')` such that the column names "
308
+ "are set to the names provided by `get_feature_names_out`."
309
+ )
310
+
311
+ if self.feature_names_out is None:
312
+ warn_msg = (
313
+ "When `set_output` is configured to be '{0}', `func` should return "
314
+ "a {0} DataFrame to follow the `set_output` API or `feature_names_out`"
315
+ " should be defined."
316
+ )
317
+ if output_config == "pandas" and not _is_pandas_df(out):
318
+ warnings.warn(warn_msg.format("pandas"))
319
+ elif output_config == "polars" and not _is_polars_df(out):
320
+ warnings.warn(warn_msg.format("polars"))
321
+
322
+ return out
323
+
324
+ def inverse_transform(self, X):
325
+ """Transform X using the inverse function.
326
+
327
+ Parameters
328
+ ----------
329
+ X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
330
+ if `validate=True` else any object that `inverse_func` can handle
331
+ Input array.
332
+
333
+ Returns
334
+ -------
335
+ X_out : array-like, shape (n_samples, n_features)
336
+ Transformed input.
337
+ """
338
+ if self.validate:
339
+ X = check_array(X, accept_sparse=self.accept_sparse)
340
+ return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
341
+
342
+ @available_if(lambda self: self.feature_names_out is not None)
343
+ def get_feature_names_out(self, input_features=None):
344
+ """Get output feature names for transformation.
345
+
346
+ This method is only defined if `feature_names_out` is not None.
347
+
348
+ Parameters
349
+ ----------
350
+ input_features : array-like of str or None, default=None
351
+ Input feature names.
352
+
353
+ - If `input_features` is None, then `feature_names_in_` is
354
+ used as the input feature names. If `feature_names_in_` is not
355
+ defined, then names are generated:
356
+ `[x0, x1, ..., x(n_features_in_ - 1)]`.
357
+ - If `input_features` is array-like, then `input_features` must
358
+ match `feature_names_in_` if `feature_names_in_` is defined.
359
+
360
+ Returns
361
+ -------
362
+ feature_names_out : ndarray of str objects
363
+ Transformed feature names.
364
+
365
+ - If `feature_names_out` is 'one-to-one', the input feature names
366
+ are returned (see `input_features` above). This requires
367
+ `feature_names_in_` and/or `n_features_in_` to be defined, which
368
+ is done automatically if `validate=True`. Alternatively, you can
369
+ set them in `func`.
370
+ - If `feature_names_out` is a callable, then it is called with two
371
+ arguments, `self` and `input_features`, and its return value is
372
+ returned by this method.
373
+ """
374
+ if hasattr(self, "n_features_in_") or input_features is not None:
375
+ input_features = _check_feature_names_in(self, input_features)
376
+ if self.feature_names_out == "one-to-one":
377
+ names_out = input_features
378
+ elif callable(self.feature_names_out):
379
+ names_out = self.feature_names_out(self, input_features)
380
+ else:
381
+ raise ValueError(
382
+ f"feature_names_out={self.feature_names_out!r} is invalid. "
383
+ 'It must either be "one-to-one" or a callable with two '
384
+ "arguments: the function transformer and an array-like of "
385
+ "input feature names. The callable must return an array-like "
386
+ "of output feature names."
387
+ )
388
+ return np.asarray(names_out, dtype=object)
389
+
390
+ def _transform(self, X, func=None, kw_args=None):
391
+ if func is None:
392
+ func = _identity
393
+
394
+ return func(X, **(kw_args if kw_args else {}))
395
+
396
+ def __sklearn_is_fitted__(self):
397
+ """Return True since FunctionTransfomer is stateless."""
398
+ return True
399
+
400
+ def _more_tags(self):
401
+ return {"no_validation": not self.validate, "stateless": True}
402
+
403
+ def set_output(self, *, transform=None):
404
+ """Set output container.
405
+
406
+ See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
407
+ for an example on how to use the API.
408
+
409
+ Parameters
410
+ ----------
411
+ transform : {"default", "pandas"}, default=None
412
+ Configure output of `transform` and `fit_transform`.
413
+
414
+ - `"default"`: Default output format of a transformer
415
+ - `"pandas"`: DataFrame output
416
+ - `"polars"`: Polars output
417
+ - `None`: Transform configuration is unchanged
418
+
419
+ .. versionadded:: 1.4
420
+ `"polars"` option was added.
421
+
422
+ Returns
423
+ -------
424
+ self : estimator instance
425
+ Estimator instance.
426
+ """
427
+ if not hasattr(self, "_sklearn_output_config"):
428
+ self._sklearn_output_config = {}
429
+
430
+ self._sklearn_output_config["transform"] = transform
431
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_label.py ADDED
@@ -0,0 +1,951 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Alexandre Gramfort <[email protected]>
2
+ # Mathieu Blondel <[email protected]>
3
+ # Olivier Grisel <[email protected]>
4
+ # Andreas Mueller <[email protected]>
5
+ # Joel Nothman <[email protected]>
6
+ # Hamzeh Alsalhi <[email protected]>
7
+ # License: BSD 3 clause
8
+
9
+ import array
10
+ import itertools
11
+ import warnings
12
+ from collections import defaultdict
13
+ from numbers import Integral
14
+
15
+ import numpy as np
16
+ import scipy.sparse as sp
17
+
18
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
19
+ from ..utils import column_or_1d
20
+ from ..utils._encode import _encode, _unique
21
+ from ..utils._param_validation import Interval, validate_params
22
+ from ..utils.multiclass import type_of_target, unique_labels
23
+ from ..utils.sparsefuncs import min_max_axis
24
+ from ..utils.validation import _num_samples, check_array, check_is_fitted
25
+
26
+ __all__ = [
27
+ "label_binarize",
28
+ "LabelBinarizer",
29
+ "LabelEncoder",
30
+ "MultiLabelBinarizer",
31
+ ]
32
+
33
+
34
+ class LabelEncoder(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
35
+ """Encode target labels with value between 0 and n_classes-1.
36
+
37
+ This transformer should be used to encode target values, *i.e.* `y`, and
38
+ not the input `X`.
39
+
40
+ Read more in the :ref:`User Guide <preprocessing_targets>`.
41
+
42
+ .. versionadded:: 0.12
43
+
44
+ Attributes
45
+ ----------
46
+ classes_ : ndarray of shape (n_classes,)
47
+ Holds the label for each class.
48
+
49
+ See Also
50
+ --------
51
+ OrdinalEncoder : Encode categorical features using an ordinal encoding
52
+ scheme.
53
+ OneHotEncoder : Encode categorical features as a one-hot numeric array.
54
+
55
+ Examples
56
+ --------
57
+ `LabelEncoder` can be used to normalize labels.
58
+
59
+ >>> from sklearn.preprocessing import LabelEncoder
60
+ >>> le = LabelEncoder()
61
+ >>> le.fit([1, 2, 2, 6])
62
+ LabelEncoder()
63
+ >>> le.classes_
64
+ array([1, 2, 6])
65
+ >>> le.transform([1, 1, 2, 6])
66
+ array([0, 0, 1, 2]...)
67
+ >>> le.inverse_transform([0, 0, 1, 2])
68
+ array([1, 1, 2, 6])
69
+
70
+ It can also be used to transform non-numerical labels (as long as they are
71
+ hashable and comparable) to numerical labels.
72
+
73
+ >>> le = LabelEncoder()
74
+ >>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
75
+ LabelEncoder()
76
+ >>> list(le.classes_)
77
+ ['amsterdam', 'paris', 'tokyo']
78
+ >>> le.transform(["tokyo", "tokyo", "paris"])
79
+ array([2, 2, 1]...)
80
+ >>> list(le.inverse_transform([2, 2, 1]))
81
+ ['tokyo', 'tokyo', 'paris']
82
+ """
83
+
84
+ def fit(self, y):
85
+ """Fit label encoder.
86
+
87
+ Parameters
88
+ ----------
89
+ y : array-like of shape (n_samples,)
90
+ Target values.
91
+
92
+ Returns
93
+ -------
94
+ self : returns an instance of self.
95
+ Fitted label encoder.
96
+ """
97
+ y = column_or_1d(y, warn=True)
98
+ self.classes_ = _unique(y)
99
+ return self
100
+
101
+ def fit_transform(self, y):
102
+ """Fit label encoder and return encoded labels.
103
+
104
+ Parameters
105
+ ----------
106
+ y : array-like of shape (n_samples,)
107
+ Target values.
108
+
109
+ Returns
110
+ -------
111
+ y : array-like of shape (n_samples,)
112
+ Encoded labels.
113
+ """
114
+ y = column_or_1d(y, warn=True)
115
+ self.classes_, y = _unique(y, return_inverse=True)
116
+ return y
117
+
118
+ def transform(self, y):
119
+ """Transform labels to normalized encoding.
120
+
121
+ Parameters
122
+ ----------
123
+ y : array-like of shape (n_samples,)
124
+ Target values.
125
+
126
+ Returns
127
+ -------
128
+ y : array-like of shape (n_samples,)
129
+ Labels as normalized encodings.
130
+ """
131
+ check_is_fitted(self)
132
+ y = column_or_1d(y, dtype=self.classes_.dtype, warn=True)
133
+ # transform of empty array is empty array
134
+ if _num_samples(y) == 0:
135
+ return np.array([])
136
+
137
+ return _encode(y, uniques=self.classes_)
138
+
139
+ def inverse_transform(self, y):
140
+ """Transform labels back to original encoding.
141
+
142
+ Parameters
143
+ ----------
144
+ y : ndarray of shape (n_samples,)
145
+ Target values.
146
+
147
+ Returns
148
+ -------
149
+ y : ndarray of shape (n_samples,)
150
+ Original encoding.
151
+ """
152
+ check_is_fitted(self)
153
+ y = column_or_1d(y, warn=True)
154
+ # inverse transform of empty array is empty array
155
+ if _num_samples(y) == 0:
156
+ return np.array([])
157
+
158
+ diff = np.setdiff1d(y, np.arange(len(self.classes_)))
159
+ if len(diff):
160
+ raise ValueError("y contains previously unseen labels: %s" % str(diff))
161
+ y = np.asarray(y)
162
+ return self.classes_[y]
163
+
164
+ def _more_tags(self):
165
+ return {"X_types": ["1dlabels"]}
166
+
167
+
168
+ class LabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
169
+ """Binarize labels in a one-vs-all fashion.
170
+
171
+ Several regression and binary classification algorithms are
172
+ available in scikit-learn. A simple way to extend these algorithms
173
+ to the multi-class classification case is to use the so-called
174
+ one-vs-all scheme.
175
+
176
+ At learning time, this simply consists in learning one regressor
177
+ or binary classifier per class. In doing so, one needs to convert
178
+ multi-class labels to binary labels (belong or does not belong
179
+ to the class). `LabelBinarizer` makes this process easy with the
180
+ transform method.
181
+
182
+ At prediction time, one assigns the class for which the corresponding
183
+ model gave the greatest confidence. `LabelBinarizer` makes this easy
184
+ with the :meth:`inverse_transform` method.
185
+
186
+ Read more in the :ref:`User Guide <preprocessing_targets>`.
187
+
188
+ Parameters
189
+ ----------
190
+ neg_label : int, default=0
191
+ Value with which negative labels must be encoded.
192
+
193
+ pos_label : int, default=1
194
+ Value with which positive labels must be encoded.
195
+
196
+ sparse_output : bool, default=False
197
+ True if the returned array from transform is desired to be in sparse
198
+ CSR format.
199
+
200
+ Attributes
201
+ ----------
202
+ classes_ : ndarray of shape (n_classes,)
203
+ Holds the label for each class.
204
+
205
+ y_type_ : str
206
+ Represents the type of the target data as evaluated by
207
+ :func:`~sklearn.utils.multiclass.type_of_target`. Possible type are
208
+ 'continuous', 'continuous-multioutput', 'binary', 'multiclass',
209
+ 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
210
+
211
+ sparse_input_ : bool
212
+ `True` if the input data to transform is given as a sparse matrix,
213
+ `False` otherwise.
214
+
215
+ See Also
216
+ --------
217
+ label_binarize : Function to perform the transform operation of
218
+ LabelBinarizer with fixed classes.
219
+ OneHotEncoder : Encode categorical features using a one-hot aka one-of-K
220
+ scheme.
221
+
222
+ Examples
223
+ --------
224
+ >>> from sklearn.preprocessing import LabelBinarizer
225
+ >>> lb = LabelBinarizer()
226
+ >>> lb.fit([1, 2, 6, 4, 2])
227
+ LabelBinarizer()
228
+ >>> lb.classes_
229
+ array([1, 2, 4, 6])
230
+ >>> lb.transform([1, 6])
231
+ array([[1, 0, 0, 0],
232
+ [0, 0, 0, 1]])
233
+
234
+ Binary targets transform to a column vector
235
+
236
+ >>> lb = LabelBinarizer()
237
+ >>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
238
+ array([[1],
239
+ [0],
240
+ [0],
241
+ [1]])
242
+
243
+ Passing a 2D matrix for multilabel classification
244
+
245
+ >>> import numpy as np
246
+ >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
247
+ LabelBinarizer()
248
+ >>> lb.classes_
249
+ array([0, 1, 2])
250
+ >>> lb.transform([0, 1, 2, 1])
251
+ array([[1, 0, 0],
252
+ [0, 1, 0],
253
+ [0, 0, 1],
254
+ [0, 1, 0]])
255
+ """
256
+
257
+ _parameter_constraints: dict = {
258
+ "neg_label": [Integral],
259
+ "pos_label": [Integral],
260
+ "sparse_output": ["boolean"],
261
+ }
262
+
263
+ def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False):
264
+ self.neg_label = neg_label
265
+ self.pos_label = pos_label
266
+ self.sparse_output = sparse_output
267
+
268
+ @_fit_context(prefer_skip_nested_validation=True)
269
+ def fit(self, y):
270
+ """Fit label binarizer.
271
+
272
+ Parameters
273
+ ----------
274
+ y : ndarray of shape (n_samples,) or (n_samples, n_classes)
275
+ Target values. The 2-d matrix should only contain 0 and 1,
276
+ represents multilabel classification.
277
+
278
+ Returns
279
+ -------
280
+ self : object
281
+ Returns the instance itself.
282
+ """
283
+ if self.neg_label >= self.pos_label:
284
+ raise ValueError(
285
+ f"neg_label={self.neg_label} must be strictly less than "
286
+ f"pos_label={self.pos_label}."
287
+ )
288
+
289
+ if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0):
290
+ raise ValueError(
291
+ "Sparse binarization is only supported with non "
292
+ "zero pos_label and zero neg_label, got "
293
+ f"pos_label={self.pos_label} and neg_label={self.neg_label}"
294
+ )
295
+
296
+ self.y_type_ = type_of_target(y, input_name="y")
297
+
298
+ if "multioutput" in self.y_type_:
299
+ raise ValueError(
300
+ "Multioutput target data is not supported with label binarization"
301
+ )
302
+ if _num_samples(y) == 0:
303
+ raise ValueError("y has 0 samples: %r" % y)
304
+
305
+ self.sparse_input_ = sp.issparse(y)
306
+ self.classes_ = unique_labels(y)
307
+ return self
308
+
309
+ def fit_transform(self, y):
310
+ """Fit label binarizer/transform multi-class labels to binary labels.
311
+
312
+ The output of transform is sometimes referred to as
313
+ the 1-of-K coding scheme.
314
+
315
+ Parameters
316
+ ----------
317
+ y : {ndarray, sparse matrix} of shape (n_samples,) or \
318
+ (n_samples, n_classes)
319
+ Target values. The 2-d matrix should only contain 0 and 1,
320
+ represents multilabel classification. Sparse matrix can be
321
+ CSR, CSC, COO, DOK, or LIL.
322
+
323
+ Returns
324
+ -------
325
+ Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
326
+ Shape will be (n_samples, 1) for binary problems. Sparse matrix
327
+ will be of CSR format.
328
+ """
329
+ return self.fit(y).transform(y)
330
+
331
+ def transform(self, y):
332
+ """Transform multi-class labels to binary labels.
333
+
334
+ The output of transform is sometimes referred to by some authors as
335
+ the 1-of-K coding scheme.
336
+
337
+ Parameters
338
+ ----------
339
+ y : {array, sparse matrix} of shape (n_samples,) or \
340
+ (n_samples, n_classes)
341
+ Target values. The 2-d matrix should only contain 0 and 1,
342
+ represents multilabel classification. Sparse matrix can be
343
+ CSR, CSC, COO, DOK, or LIL.
344
+
345
+ Returns
346
+ -------
347
+ Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
348
+ Shape will be (n_samples, 1) for binary problems. Sparse matrix
349
+ will be of CSR format.
350
+ """
351
+ check_is_fitted(self)
352
+
353
+ y_is_multilabel = type_of_target(y).startswith("multilabel")
354
+ if y_is_multilabel and not self.y_type_.startswith("multilabel"):
355
+ raise ValueError("The object was not fitted with multilabel input.")
356
+
357
+ return label_binarize(
358
+ y,
359
+ classes=self.classes_,
360
+ pos_label=self.pos_label,
361
+ neg_label=self.neg_label,
362
+ sparse_output=self.sparse_output,
363
+ )
364
+
365
+ def inverse_transform(self, Y, threshold=None):
366
+ """Transform binary labels back to multi-class labels.
367
+
368
+ Parameters
369
+ ----------
370
+ Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
371
+ Target values. All sparse matrices are converted to CSR before
372
+ inverse transformation.
373
+
374
+ threshold : float, default=None
375
+ Threshold used in the binary and multi-label cases.
376
+
377
+ Use 0 when ``Y`` contains the output of :term:`decision_function`
378
+ (classifier).
379
+ Use 0.5 when ``Y`` contains the output of :term:`predict_proba`.
380
+
381
+ If None, the threshold is assumed to be half way between
382
+ neg_label and pos_label.
383
+
384
+ Returns
385
+ -------
386
+ y : {ndarray, sparse matrix} of shape (n_samples,)
387
+ Target values. Sparse matrix will be of CSR format.
388
+
389
+ Notes
390
+ -----
391
+ In the case when the binary labels are fractional
392
+ (probabilistic), :meth:`inverse_transform` chooses the class with the
393
+ greatest value. Typically, this allows to use the output of a
394
+ linear model's :term:`decision_function` method directly as the input
395
+ of :meth:`inverse_transform`.
396
+ """
397
+ check_is_fitted(self)
398
+
399
+ if threshold is None:
400
+ threshold = (self.pos_label + self.neg_label) / 2.0
401
+
402
+ if self.y_type_ == "multiclass":
403
+ y_inv = _inverse_binarize_multiclass(Y, self.classes_)
404
+ else:
405
+ y_inv = _inverse_binarize_thresholding(
406
+ Y, self.y_type_, self.classes_, threshold
407
+ )
408
+
409
+ if self.sparse_input_:
410
+ y_inv = sp.csr_matrix(y_inv)
411
+ elif sp.issparse(y_inv):
412
+ y_inv = y_inv.toarray()
413
+
414
+ return y_inv
415
+
416
+ def _more_tags(self):
417
+ return {"X_types": ["1dlabels"]}
418
+
419
+
420
+ @validate_params(
421
+ {
422
+ "y": ["array-like"],
423
+ "classes": ["array-like"],
424
+ "neg_label": [Interval(Integral, None, None, closed="neither")],
425
+ "pos_label": [Interval(Integral, None, None, closed="neither")],
426
+ "sparse_output": ["boolean"],
427
+ },
428
+ prefer_skip_nested_validation=True,
429
+ )
430
+ def label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False):
431
+ """Binarize labels in a one-vs-all fashion.
432
+
433
+ Several regression and binary classification algorithms are
434
+ available in scikit-learn. A simple way to extend these algorithms
435
+ to the multi-class classification case is to use the so-called
436
+ one-vs-all scheme.
437
+
438
+ This function makes it possible to compute this transformation for a
439
+ fixed set of class labels known ahead of time.
440
+
441
+ Parameters
442
+ ----------
443
+ y : array-like
444
+ Sequence of integer labels or multilabel data to encode.
445
+
446
+ classes : array-like of shape (n_classes,)
447
+ Uniquely holds the label for each class.
448
+
449
+ neg_label : int, default=0
450
+ Value with which negative labels must be encoded.
451
+
452
+ pos_label : int, default=1
453
+ Value with which positive labels must be encoded.
454
+
455
+ sparse_output : bool, default=False,
456
+ Set to true if output binary array is desired in CSR sparse format.
457
+
458
+ Returns
459
+ -------
460
+ Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
461
+ Shape will be (n_samples, 1) for binary problems. Sparse matrix will
462
+ be of CSR format.
463
+
464
+ See Also
465
+ --------
466
+ LabelBinarizer : Class used to wrap the functionality of label_binarize and
467
+ allow for fitting to classes independently of the transform operation.
468
+
469
+ Examples
470
+ --------
471
+ >>> from sklearn.preprocessing import label_binarize
472
+ >>> label_binarize([1, 6], classes=[1, 2, 4, 6])
473
+ array([[1, 0, 0, 0],
474
+ [0, 0, 0, 1]])
475
+
476
+ The class ordering is preserved:
477
+
478
+ >>> label_binarize([1, 6], classes=[1, 6, 4, 2])
479
+ array([[1, 0, 0, 0],
480
+ [0, 1, 0, 0]])
481
+
482
+ Binary targets transform to a column vector
483
+
484
+ >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
485
+ array([[1],
486
+ [0],
487
+ [0],
488
+ [1]])
489
+ """
490
+ if not isinstance(y, list):
491
+ # XXX Workaround that will be removed when list of list format is
492
+ # dropped
493
+ y = check_array(
494
+ y, input_name="y", accept_sparse="csr", ensure_2d=False, dtype=None
495
+ )
496
+ else:
497
+ if _num_samples(y) == 0:
498
+ raise ValueError("y has 0 samples: %r" % y)
499
+ if neg_label >= pos_label:
500
+ raise ValueError(
501
+ "neg_label={0} must be strictly less than pos_label={1}.".format(
502
+ neg_label, pos_label
503
+ )
504
+ )
505
+
506
+ if sparse_output and (pos_label == 0 or neg_label != 0):
507
+ raise ValueError(
508
+ "Sparse binarization is only supported with non "
509
+ "zero pos_label and zero neg_label, got "
510
+ "pos_label={0} and neg_label={1}"
511
+ "".format(pos_label, neg_label)
512
+ )
513
+
514
+ # To account for pos_label == 0 in the dense case
515
+ pos_switch = pos_label == 0
516
+ if pos_switch:
517
+ pos_label = -neg_label
518
+
519
+ y_type = type_of_target(y)
520
+ if "multioutput" in y_type:
521
+ raise ValueError(
522
+ "Multioutput target data is not supported with label binarization"
523
+ )
524
+ if y_type == "unknown":
525
+ raise ValueError("The type of target data is not known")
526
+
527
+ n_samples = y.shape[0] if sp.issparse(y) else len(y)
528
+ n_classes = len(classes)
529
+ classes = np.asarray(classes)
530
+
531
+ if y_type == "binary":
532
+ if n_classes == 1:
533
+ if sparse_output:
534
+ return sp.csr_matrix((n_samples, 1), dtype=int)
535
+ else:
536
+ Y = np.zeros((len(y), 1), dtype=int)
537
+ Y += neg_label
538
+ return Y
539
+ elif len(classes) >= 3:
540
+ y_type = "multiclass"
541
+
542
+ sorted_class = np.sort(classes)
543
+ if y_type == "multilabel-indicator":
544
+ y_n_classes = y.shape[1] if hasattr(y, "shape") else len(y[0])
545
+ if classes.size != y_n_classes:
546
+ raise ValueError(
547
+ "classes {0} mismatch with the labels {1} found in the data".format(
548
+ classes, unique_labels(y)
549
+ )
550
+ )
551
+
552
+ if y_type in ("binary", "multiclass"):
553
+ y = column_or_1d(y)
554
+
555
+ # pick out the known labels from y
556
+ y_in_classes = np.isin(y, classes)
557
+ y_seen = y[y_in_classes]
558
+ indices = np.searchsorted(sorted_class, y_seen)
559
+ indptr = np.hstack((0, np.cumsum(y_in_classes)))
560
+
561
+ data = np.empty_like(indices)
562
+ data.fill(pos_label)
563
+ Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes))
564
+ elif y_type == "multilabel-indicator":
565
+ Y = sp.csr_matrix(y)
566
+ if pos_label != 1:
567
+ data = np.empty_like(Y.data)
568
+ data.fill(pos_label)
569
+ Y.data = data
570
+ else:
571
+ raise ValueError(
572
+ "%s target data is not supported with label binarization" % y_type
573
+ )
574
+
575
+ if not sparse_output:
576
+ Y = Y.toarray()
577
+ Y = Y.astype(int, copy=False)
578
+
579
+ if neg_label != 0:
580
+ Y[Y == 0] = neg_label
581
+
582
+ if pos_switch:
583
+ Y[Y == pos_label] = 0
584
+ else:
585
+ Y.data = Y.data.astype(int, copy=False)
586
+
587
+ # preserve label ordering
588
+ if np.any(classes != sorted_class):
589
+ indices = np.searchsorted(sorted_class, classes)
590
+ Y = Y[:, indices]
591
+
592
+ if y_type == "binary":
593
+ if sparse_output:
594
+ Y = Y.getcol(-1)
595
+ else:
596
+ Y = Y[:, -1].reshape((-1, 1))
597
+
598
+ return Y
599
+
600
+
601
+ def _inverse_binarize_multiclass(y, classes):
602
+ """Inverse label binarization transformation for multiclass.
603
+
604
+ Multiclass uses the maximal score instead of a threshold.
605
+ """
606
+ classes = np.asarray(classes)
607
+
608
+ if sp.issparse(y):
609
+ # Find the argmax for each row in y where y is a CSR matrix
610
+
611
+ y = y.tocsr()
612
+ n_samples, n_outputs = y.shape
613
+ outputs = np.arange(n_outputs)
614
+ row_max = min_max_axis(y, 1)[1]
615
+ row_nnz = np.diff(y.indptr)
616
+
617
+ y_data_repeated_max = np.repeat(row_max, row_nnz)
618
+ # picks out all indices obtaining the maximum per row
619
+ y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
620
+
621
+ # For corner case where last row has a max of 0
622
+ if row_max[-1] == 0:
623
+ y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
624
+
625
+ # Gets the index of the first argmax in each row from y_i_all_argmax
626
+ index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
627
+ # first argmax of each row
628
+ y_ind_ext = np.append(y.indices, [0])
629
+ y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
630
+ # Handle rows of all 0
631
+ y_i_argmax[np.where(row_nnz == 0)[0]] = 0
632
+
633
+ # Handles rows with max of 0 that contain negative numbers
634
+ samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)]
635
+ for i in samples:
636
+ ind = y.indices[y.indptr[i] : y.indptr[i + 1]]
637
+ y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
638
+
639
+ return classes[y_i_argmax]
640
+ else:
641
+ return classes.take(y.argmax(axis=1), mode="clip")
642
+
643
+
644
+ def _inverse_binarize_thresholding(y, output_type, classes, threshold):
645
+ """Inverse label binarization transformation using thresholding."""
646
+
647
+ if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
648
+ raise ValueError("output_type='binary', but y.shape = {0}".format(y.shape))
649
+
650
+ if output_type != "binary" and y.shape[1] != len(classes):
651
+ raise ValueError(
652
+ "The number of class is not equal to the number of dimension of y."
653
+ )
654
+
655
+ classes = np.asarray(classes)
656
+
657
+ # Perform thresholding
658
+ if sp.issparse(y):
659
+ if threshold > 0:
660
+ if y.format not in ("csr", "csc"):
661
+ y = y.tocsr()
662
+ y.data = np.array(y.data > threshold, dtype=int)
663
+ y.eliminate_zeros()
664
+ else:
665
+ y = np.array(y.toarray() > threshold, dtype=int)
666
+ else:
667
+ y = np.array(y > threshold, dtype=int)
668
+
669
+ # Inverse transform data
670
+ if output_type == "binary":
671
+ if sp.issparse(y):
672
+ y = y.toarray()
673
+ if y.ndim == 2 and y.shape[1] == 2:
674
+ return classes[y[:, 1]]
675
+ else:
676
+ if len(classes) == 1:
677
+ return np.repeat(classes[0], len(y))
678
+ else:
679
+ return classes[y.ravel()]
680
+
681
+ elif output_type == "multilabel-indicator":
682
+ return y
683
+
684
+ else:
685
+ raise ValueError("{0} format is not supported".format(output_type))
686
+
687
+
688
+ class MultiLabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
689
+ """Transform between iterable of iterables and a multilabel format.
690
+
691
+ Although a list of sets or tuples is a very intuitive format for multilabel
692
+ data, it is unwieldy to process. This transformer converts between this
693
+ intuitive format and the supported multilabel format: a (samples x classes)
694
+ binary matrix indicating the presence of a class label.
695
+
696
+ Parameters
697
+ ----------
698
+ classes : array-like of shape (n_classes,), default=None
699
+ Indicates an ordering for the class labels.
700
+ All entries should be unique (cannot contain duplicate classes).
701
+
702
+ sparse_output : bool, default=False
703
+ Set to True if output binary array is desired in CSR sparse format.
704
+
705
+ Attributes
706
+ ----------
707
+ classes_ : ndarray of shape (n_classes,)
708
+ A copy of the `classes` parameter when provided.
709
+ Otherwise it corresponds to the sorted set of classes found
710
+ when fitting.
711
+
712
+ See Also
713
+ --------
714
+ OneHotEncoder : Encode categorical features using a one-hot aka one-of-K
715
+ scheme.
716
+
717
+ Examples
718
+ --------
719
+ >>> from sklearn.preprocessing import MultiLabelBinarizer
720
+ >>> mlb = MultiLabelBinarizer()
721
+ >>> mlb.fit_transform([(1, 2), (3,)])
722
+ array([[1, 1, 0],
723
+ [0, 0, 1]])
724
+ >>> mlb.classes_
725
+ array([1, 2, 3])
726
+
727
+ >>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}])
728
+ array([[0, 1, 1],
729
+ [1, 0, 0]])
730
+ >>> list(mlb.classes_)
731
+ ['comedy', 'sci-fi', 'thriller']
732
+
733
+ A common mistake is to pass in a list, which leads to the following issue:
734
+
735
+ >>> mlb = MultiLabelBinarizer()
736
+ >>> mlb.fit(['sci-fi', 'thriller', 'comedy'])
737
+ MultiLabelBinarizer()
738
+ >>> mlb.classes_
739
+ array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't',
740
+ 'y'], dtype=object)
741
+
742
+ To correct this, the list of labels should be passed in as:
743
+
744
+ >>> mlb = MultiLabelBinarizer()
745
+ >>> mlb.fit([['sci-fi', 'thriller', 'comedy']])
746
+ MultiLabelBinarizer()
747
+ >>> mlb.classes_
748
+ array(['comedy', 'sci-fi', 'thriller'], dtype=object)
749
+ """
750
+
751
+ _parameter_constraints: dict = {
752
+ "classes": ["array-like", None],
753
+ "sparse_output": ["boolean"],
754
+ }
755
+
756
+ def __init__(self, *, classes=None, sparse_output=False):
757
+ self.classes = classes
758
+ self.sparse_output = sparse_output
759
+
760
+ @_fit_context(prefer_skip_nested_validation=True)
761
+ def fit(self, y):
762
+ """Fit the label sets binarizer, storing :term:`classes_`.
763
+
764
+ Parameters
765
+ ----------
766
+ y : iterable of iterables
767
+ A set of labels (any orderable and hashable object) for each
768
+ sample. If the `classes` parameter is set, `y` will not be
769
+ iterated.
770
+
771
+ Returns
772
+ -------
773
+ self : object
774
+ Fitted estimator.
775
+ """
776
+ self._cached_dict = None
777
+
778
+ if self.classes is None:
779
+ classes = sorted(set(itertools.chain.from_iterable(y)))
780
+ elif len(set(self.classes)) < len(self.classes):
781
+ raise ValueError(
782
+ "The classes argument contains duplicate "
783
+ "classes. Remove these duplicates before passing "
784
+ "them to MultiLabelBinarizer."
785
+ )
786
+ else:
787
+ classes = self.classes
788
+ dtype = int if all(isinstance(c, int) for c in classes) else object
789
+ self.classes_ = np.empty(len(classes), dtype=dtype)
790
+ self.classes_[:] = classes
791
+ return self
792
+
793
+ @_fit_context(prefer_skip_nested_validation=True)
794
+ def fit_transform(self, y):
795
+ """Fit the label sets binarizer and transform the given label sets.
796
+
797
+ Parameters
798
+ ----------
799
+ y : iterable of iterables
800
+ A set of labels (any orderable and hashable object) for each
801
+ sample. If the `classes` parameter is set, `y` will not be
802
+ iterated.
803
+
804
+ Returns
805
+ -------
806
+ y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
807
+ A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
808
+ is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
809
+ format.
810
+ """
811
+ if self.classes is not None:
812
+ return self.fit(y).transform(y)
813
+
814
+ self._cached_dict = None
815
+
816
+ # Automatically increment on new class
817
+ class_mapping = defaultdict(int)
818
+ class_mapping.default_factory = class_mapping.__len__
819
+ yt = self._transform(y, class_mapping)
820
+
821
+ # sort classes and reorder columns
822
+ tmp = sorted(class_mapping, key=class_mapping.get)
823
+
824
+ # (make safe for tuples)
825
+ dtype = int if all(isinstance(c, int) for c in tmp) else object
826
+ class_mapping = np.empty(len(tmp), dtype=dtype)
827
+ class_mapping[:] = tmp
828
+ self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
829
+ # ensure yt.indices keeps its current dtype
830
+ yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype)
831
+
832
+ if not self.sparse_output:
833
+ yt = yt.toarray()
834
+
835
+ return yt
836
+
837
+ def transform(self, y):
838
+ """Transform the given label sets.
839
+
840
+ Parameters
841
+ ----------
842
+ y : iterable of iterables
843
+ A set of labels (any orderable and hashable object) for each
844
+ sample. If the `classes` parameter is set, `y` will not be
845
+ iterated.
846
+
847
+ Returns
848
+ -------
849
+ y_indicator : array or CSR matrix, shape (n_samples, n_classes)
850
+ A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
851
+ `y[i]`, and 0 otherwise.
852
+ """
853
+ check_is_fitted(self)
854
+
855
+ class_to_index = self._build_cache()
856
+ yt = self._transform(y, class_to_index)
857
+
858
+ if not self.sparse_output:
859
+ yt = yt.toarray()
860
+
861
+ return yt
862
+
863
+ def _build_cache(self):
864
+ if self._cached_dict is None:
865
+ self._cached_dict = dict(zip(self.classes_, range(len(self.classes_))))
866
+
867
+ return self._cached_dict
868
+
869
+ def _transform(self, y, class_mapping):
870
+ """Transforms the label sets with a given mapping.
871
+
872
+ Parameters
873
+ ----------
874
+ y : iterable of iterables
875
+ A set of labels (any orderable and hashable object) for each
876
+ sample. If the `classes` parameter is set, `y` will not be
877
+ iterated.
878
+
879
+ class_mapping : Mapping
880
+ Maps from label to column index in label indicator matrix.
881
+
882
+ Returns
883
+ -------
884
+ y_indicator : sparse matrix of shape (n_samples, n_classes)
885
+ Label indicator matrix. Will be of CSR format.
886
+ """
887
+ indices = array.array("i")
888
+ indptr = array.array("i", [0])
889
+ unknown = set()
890
+ for labels in y:
891
+ index = set()
892
+ for label in labels:
893
+ try:
894
+ index.add(class_mapping[label])
895
+ except KeyError:
896
+ unknown.add(label)
897
+ indices.extend(index)
898
+ indptr.append(len(indices))
899
+ if unknown:
900
+ warnings.warn(
901
+ "unknown class(es) {0} will be ignored".format(sorted(unknown, key=str))
902
+ )
903
+ data = np.ones(len(indices), dtype=int)
904
+
905
+ return sp.csr_matrix(
906
+ (data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping))
907
+ )
908
+
909
+ def inverse_transform(self, yt):
910
+ """Transform the given indicator matrix into label sets.
911
+
912
+ Parameters
913
+ ----------
914
+ yt : {ndarray, sparse matrix} of shape (n_samples, n_classes)
915
+ A matrix containing only 1s ands 0s.
916
+
917
+ Returns
918
+ -------
919
+ y : list of tuples
920
+ The set of labels for each sample such that `y[i]` consists of
921
+ `classes_[j]` for each `yt[i, j] == 1`.
922
+ """
923
+ check_is_fitted(self)
924
+
925
+ if yt.shape[1] != len(self.classes_):
926
+ raise ValueError(
927
+ "Expected indicator for {0} classes, but got {1}".format(
928
+ len(self.classes_), yt.shape[1]
929
+ )
930
+ )
931
+
932
+ if sp.issparse(yt):
933
+ yt = yt.tocsr()
934
+ if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
935
+ raise ValueError("Expected only 0s and 1s in label indicator.")
936
+ return [
937
+ tuple(self.classes_.take(yt.indices[start:end]))
938
+ for start, end in zip(yt.indptr[:-1], yt.indptr[1:])
939
+ ]
940
+ else:
941
+ unexpected = np.setdiff1d(yt, [0, 1])
942
+ if len(unexpected) > 0:
943
+ raise ValueError(
944
+ "Expected only 0s and 1s in label indicator. Also got {0}".format(
945
+ unexpected
946
+ )
947
+ )
948
+ return [tuple(self.classes_.compress(indicators)) for indicators in yt]
949
+
950
+ def _more_tags(self):
951
+ return {"X_types": ["2dlabels"]}
llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py ADDED
@@ -0,0 +1,1172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains preprocessing tools based on polynomials.
3
+ """
4
+ import collections
5
+ from itertools import chain, combinations
6
+ from itertools import combinations_with_replacement as combinations_w_r
7
+ from numbers import Integral
8
+
9
+ import numpy as np
10
+ from scipy import sparse
11
+ from scipy.interpolate import BSpline
12
+ from scipy.special import comb
13
+
14
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
15
+ from ..utils import check_array
16
+ from ..utils._param_validation import Interval, StrOptions
17
+ from ..utils.fixes import parse_version, sp_version
18
+ from ..utils.stats import _weighted_percentile
19
+ from ..utils.validation import (
20
+ FLOAT_DTYPES,
21
+ _check_feature_names_in,
22
+ _check_sample_weight,
23
+ check_is_fitted,
24
+ )
25
+ from ._csr_polynomial_expansion import (
26
+ _calc_expanded_nnz,
27
+ _calc_total_nnz,
28
+ _csr_polynomial_expansion,
29
+ )
30
+
31
+ __all__ = [
32
+ "PolynomialFeatures",
33
+ "SplineTransformer",
34
+ ]
35
+
36
+
37
+ def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0):
38
+ """Helper function for creating and appending sparse expansion matrices"""
39
+
40
+ total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg)
41
+ expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg)
42
+
43
+ if expanded_col == 0:
44
+ return None
45
+ # This only checks whether each block needs 64bit integers upon
46
+ # expansion. We prefer to keep int32 indexing where we can,
47
+ # since currently SciPy's CSR construction downcasts when possible,
48
+ # so we prefer to avoid an unnecessary cast. The dtype may still
49
+ # change in the concatenation process if needed.
50
+ # See: https://github.com/scipy/scipy/issues/16569
51
+ max_indices = expanded_col - 1
52
+ max_indptr = total_nnz
53
+ max_int32 = np.iinfo(np.int32).max
54
+ needs_int64 = max(max_indices, max_indptr) > max_int32
55
+ index_dtype = np.int64 if needs_int64 else np.int32
56
+
57
+ # This is a pretty specific bug that is hard to work around by a user,
58
+ # hence we do not detail the entire bug and all possible avoidance
59
+ # mechnasisms. Instead we recommend upgrading scipy or shrinking their data.
60
+ cumulative_size += expanded_col
61
+ if (
62
+ sp_version < parse_version("1.8.0")
63
+ and cumulative_size - 1 > max_int32
64
+ and not needs_int64
65
+ ):
66
+ raise ValueError(
67
+ "In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`"
68
+ " sometimes produces negative columns when the output shape contains"
69
+ " `n_cols` too large to be represented by a 32bit signed"
70
+ " integer. To avoid this error, either use a version"
71
+ " of scipy `>=1.8.0` or alter the `PolynomialFeatures`"
72
+ " transformer to produce fewer than 2^31 output features."
73
+ )
74
+
75
+ # Result of the expansion, modified in place by the
76
+ # `_csr_polynomial_expansion` routine.
77
+ expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype)
78
+ expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype)
79
+ expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype)
80
+ _csr_polynomial_expansion(
81
+ X.data,
82
+ X.indices,
83
+ X.indptr,
84
+ X.shape[1],
85
+ expanded_data,
86
+ expanded_indices,
87
+ expanded_indptr,
88
+ interaction_only,
89
+ deg,
90
+ )
91
+ return sparse.csr_matrix(
92
+ (expanded_data, expanded_indices, expanded_indptr),
93
+ shape=(X.indptr.shape[0] - 1, expanded_col),
94
+ dtype=X.dtype,
95
+ )
96
+
97
+
98
+ class PolynomialFeatures(TransformerMixin, BaseEstimator):
99
+ """Generate polynomial and interaction features.
100
+
101
+ Generate a new feature matrix consisting of all polynomial combinations
102
+ of the features with degree less than or equal to the specified degree.
103
+ For example, if an input sample is two dimensional and of the form
104
+ [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
105
+
106
+ Read more in the :ref:`User Guide <polynomial_features>`.
107
+
108
+ Parameters
109
+ ----------
110
+ degree : int or tuple (min_degree, max_degree), default=2
111
+ If a single int is given, it specifies the maximal degree of the
112
+ polynomial features. If a tuple `(min_degree, max_degree)` is passed,
113
+ then `min_degree` is the minimum and `max_degree` is the maximum
114
+ polynomial degree of the generated features. Note that `min_degree=0`
115
+ and `min_degree=1` are equivalent as outputting the degree zero term is
116
+ determined by `include_bias`.
117
+
118
+ interaction_only : bool, default=False
119
+ If `True`, only interaction features are produced: features that are
120
+ products of at most `degree` *distinct* input features, i.e. terms with
121
+ power of 2 or higher of the same input feature are excluded:
122
+
123
+ - included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc.
124
+ - excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc.
125
+
126
+ include_bias : bool, default=True
127
+ If `True` (default), then include a bias column, the feature in which
128
+ all polynomial powers are zero (i.e. a column of ones - acts as an
129
+ intercept term in a linear model).
130
+
131
+ order : {'C', 'F'}, default='C'
132
+ Order of output array in the dense case. `'F'` order is faster to
133
+ compute, but may slow down subsequent estimators.
134
+
135
+ .. versionadded:: 0.21
136
+
137
+ Attributes
138
+ ----------
139
+ powers_ : ndarray of shape (`n_output_features_`, `n_features_in_`)
140
+ `powers_[i, j]` is the exponent of the jth input in the ith output.
141
+
142
+ n_features_in_ : int
143
+ Number of features seen during :term:`fit`.
144
+
145
+ .. versionadded:: 0.24
146
+
147
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
148
+ Names of features seen during :term:`fit`. Defined only when `X`
149
+ has feature names that are all strings.
150
+
151
+ .. versionadded:: 1.0
152
+
153
+ n_output_features_ : int
154
+ The total number of polynomial output features. The number of output
155
+ features is computed by iterating over all suitably sized combinations
156
+ of input features.
157
+
158
+ See Also
159
+ --------
160
+ SplineTransformer : Transformer that generates univariate B-spline bases
161
+ for features.
162
+
163
+ Notes
164
+ -----
165
+ Be aware that the number of features in the output array scales
166
+ polynomially in the number of features of the input array, and
167
+ exponentially in the degree. High degrees can cause overfitting.
168
+
169
+ See :ref:`examples/linear_model/plot_polynomial_interpolation.py
170
+ <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
171
+
172
+ Examples
173
+ --------
174
+ >>> import numpy as np
175
+ >>> from sklearn.preprocessing import PolynomialFeatures
176
+ >>> X = np.arange(6).reshape(3, 2)
177
+ >>> X
178
+ array([[0, 1],
179
+ [2, 3],
180
+ [4, 5]])
181
+ >>> poly = PolynomialFeatures(2)
182
+ >>> poly.fit_transform(X)
183
+ array([[ 1., 0., 1., 0., 0., 1.],
184
+ [ 1., 2., 3., 4., 6., 9.],
185
+ [ 1., 4., 5., 16., 20., 25.]])
186
+ >>> poly = PolynomialFeatures(interaction_only=True)
187
+ >>> poly.fit_transform(X)
188
+ array([[ 1., 0., 1., 0.],
189
+ [ 1., 2., 3., 6.],
190
+ [ 1., 4., 5., 20.]])
191
+ """
192
+
193
+ _parameter_constraints: dict = {
194
+ "degree": [Interval(Integral, 0, None, closed="left"), "array-like"],
195
+ "interaction_only": ["boolean"],
196
+ "include_bias": ["boolean"],
197
+ "order": [StrOptions({"C", "F"})],
198
+ }
199
+
200
+ def __init__(
201
+ self, degree=2, *, interaction_only=False, include_bias=True, order="C"
202
+ ):
203
+ self.degree = degree
204
+ self.interaction_only = interaction_only
205
+ self.include_bias = include_bias
206
+ self.order = order
207
+
208
+ @staticmethod
209
+ def _combinations(
210
+ n_features, min_degree, max_degree, interaction_only, include_bias
211
+ ):
212
+ comb = combinations if interaction_only else combinations_w_r
213
+ start = max(1, min_degree)
214
+ iter = chain.from_iterable(
215
+ comb(range(n_features), i) for i in range(start, max_degree + 1)
216
+ )
217
+ if include_bias:
218
+ iter = chain(comb(range(n_features), 0), iter)
219
+ return iter
220
+
221
+ @staticmethod
222
+ def _num_combinations(
223
+ n_features, min_degree, max_degree, interaction_only, include_bias
224
+ ):
225
+ """Calculate number of terms in polynomial expansion
226
+
227
+ This should be equivalent to counting the number of terms returned by
228
+ _combinations(...) but much faster.
229
+ """
230
+
231
+ if interaction_only:
232
+ combinations = sum(
233
+ [
234
+ comb(n_features, i, exact=True)
235
+ for i in range(max(1, min_degree), min(max_degree, n_features) + 1)
236
+ ]
237
+ )
238
+ else:
239
+ combinations = comb(n_features + max_degree, max_degree, exact=True) - 1
240
+ if min_degree > 0:
241
+ d = min_degree - 1
242
+ combinations -= comb(n_features + d, d, exact=True) - 1
243
+
244
+ if include_bias:
245
+ combinations += 1
246
+
247
+ return combinations
248
+
249
+ @property
250
+ def powers_(self):
251
+ """Exponent for each of the inputs in the output."""
252
+ check_is_fitted(self)
253
+
254
+ combinations = self._combinations(
255
+ n_features=self.n_features_in_,
256
+ min_degree=self._min_degree,
257
+ max_degree=self._max_degree,
258
+ interaction_only=self.interaction_only,
259
+ include_bias=self.include_bias,
260
+ )
261
+ return np.vstack(
262
+ [np.bincount(c, minlength=self.n_features_in_) for c in combinations]
263
+ )
264
+
265
+ def get_feature_names_out(self, input_features=None):
266
+ """Get output feature names for transformation.
267
+
268
+ Parameters
269
+ ----------
270
+ input_features : array-like of str or None, default=None
271
+ Input features.
272
+
273
+ - If `input_features is None`, then `feature_names_in_` is
274
+ used as feature names in. If `feature_names_in_` is not defined,
275
+ then the following input feature names are generated:
276
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
277
+ - If `input_features` is an array-like, then `input_features` must
278
+ match `feature_names_in_` if `feature_names_in_` is defined.
279
+
280
+ Returns
281
+ -------
282
+ feature_names_out : ndarray of str objects
283
+ Transformed feature names.
284
+ """
285
+ powers = self.powers_
286
+ input_features = _check_feature_names_in(self, input_features)
287
+ feature_names = []
288
+ for row in powers:
289
+ inds = np.where(row)[0]
290
+ if len(inds):
291
+ name = " ".join(
292
+ (
293
+ "%s^%d" % (input_features[ind], exp)
294
+ if exp != 1
295
+ else input_features[ind]
296
+ )
297
+ for ind, exp in zip(inds, row[inds])
298
+ )
299
+ else:
300
+ name = "1"
301
+ feature_names.append(name)
302
+ return np.asarray(feature_names, dtype=object)
303
+
304
+ @_fit_context(prefer_skip_nested_validation=True)
305
+ def fit(self, X, y=None):
306
+ """
307
+ Compute number of output features.
308
+
309
+ Parameters
310
+ ----------
311
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
312
+ The data.
313
+
314
+ y : Ignored
315
+ Not used, present here for API consistency by convention.
316
+
317
+ Returns
318
+ -------
319
+ self : object
320
+ Fitted transformer.
321
+ """
322
+ _, n_features = self._validate_data(X, accept_sparse=True).shape
323
+
324
+ if isinstance(self.degree, Integral):
325
+ if self.degree == 0 and not self.include_bias:
326
+ raise ValueError(
327
+ "Setting degree to zero and include_bias to False would result in"
328
+ " an empty output array."
329
+ )
330
+
331
+ self._min_degree = 0
332
+ self._max_degree = self.degree
333
+ elif (
334
+ isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2
335
+ ):
336
+ self._min_degree, self._max_degree = self.degree
337
+ if not (
338
+ isinstance(self._min_degree, Integral)
339
+ and isinstance(self._max_degree, Integral)
340
+ and self._min_degree >= 0
341
+ and self._min_degree <= self._max_degree
342
+ ):
343
+ raise ValueError(
344
+ "degree=(min_degree, max_degree) must "
345
+ "be non-negative integers that fulfil "
346
+ "min_degree <= max_degree, got "
347
+ f"{self.degree}."
348
+ )
349
+ elif self._max_degree == 0 and not self.include_bias:
350
+ raise ValueError(
351
+ "Setting both min_degree and max_degree to zero and include_bias to"
352
+ " False would result in an empty output array."
353
+ )
354
+ else:
355
+ raise ValueError(
356
+ "degree must be a non-negative int or tuple "
357
+ "(min_degree, max_degree), got "
358
+ f"{self.degree}."
359
+ )
360
+
361
+ self.n_output_features_ = self._num_combinations(
362
+ n_features=n_features,
363
+ min_degree=self._min_degree,
364
+ max_degree=self._max_degree,
365
+ interaction_only=self.interaction_only,
366
+ include_bias=self.include_bias,
367
+ )
368
+ if self.n_output_features_ > np.iinfo(np.intp).max:
369
+ msg = (
370
+ "The output that would result from the current configuration would"
371
+ f" have {self.n_output_features_} features which is too large to be"
372
+ f" indexed by {np.intp().dtype.name}. Please change some or all of the"
373
+ " following:\n- The number of features in the input, currently"
374
+ f" {n_features=}\n- The range of degrees to calculate, currently"
375
+ f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only"
376
+ f" interaction terms, currently {self.interaction_only}\n- Whether to"
377
+ f" include a bias term, currently {self.include_bias}."
378
+ )
379
+ if (
380
+ np.intp == np.int32
381
+ and self.n_output_features_ <= np.iinfo(np.int64).max
382
+ ): # pragma: nocover
383
+ msg += (
384
+ "\nNote that the current Python runtime has a limited 32 bit "
385
+ "address space and that this configuration would have been "
386
+ "admissible if run on a 64 bit Python runtime."
387
+ )
388
+ raise ValueError(msg)
389
+ # We also record the number of output features for
390
+ # _max_degree = 0
391
+ self._n_out_full = self._num_combinations(
392
+ n_features=n_features,
393
+ min_degree=0,
394
+ max_degree=self._max_degree,
395
+ interaction_only=self.interaction_only,
396
+ include_bias=self.include_bias,
397
+ )
398
+
399
+ return self
400
+
401
+ def transform(self, X):
402
+ """Transform data to polynomial features.
403
+
404
+ Parameters
405
+ ----------
406
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
407
+ The data to transform, row by row.
408
+
409
+ Prefer CSR over CSC for sparse input (for speed), but CSC is
410
+ required if the degree is 4 or higher. If the degree is less than
411
+ 4 and the input format is CSC, it will be converted to CSR, have
412
+ its polynomial features generated, then converted back to CSC.
413
+
414
+ If the degree is 2 or 3, the method described in "Leveraging
415
+ Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices
416
+ Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is
417
+ used, which is much faster than the method used on CSC input. For
418
+ this reason, a CSC input will be converted to CSR, and the output
419
+ will be converted back to CSC prior to being returned, hence the
420
+ preference of CSR.
421
+
422
+ Returns
423
+ -------
424
+ XP : {ndarray, sparse matrix} of shape (n_samples, NP)
425
+ The matrix of features, where `NP` is the number of polynomial
426
+ features generated from the combination of inputs. If a sparse
427
+ matrix is provided, it will be converted into a sparse
428
+ `csr_matrix`.
429
+ """
430
+ check_is_fitted(self)
431
+
432
+ X = self._validate_data(
433
+ X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc")
434
+ )
435
+
436
+ n_samples, n_features = X.shape
437
+ max_int32 = np.iinfo(np.int32).max
438
+ if sparse.issparse(X) and X.format == "csr":
439
+ if self._max_degree > 3:
440
+ return self.transform(X.tocsc()).tocsr()
441
+ to_stack = []
442
+ if self.include_bias:
443
+ to_stack.append(
444
+ sparse.csr_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype))
445
+ )
446
+ if self._min_degree <= 1 and self._max_degree > 0:
447
+ to_stack.append(X)
448
+
449
+ cumulative_size = sum(mat.shape[1] for mat in to_stack)
450
+ for deg in range(max(2, self._min_degree), self._max_degree + 1):
451
+ expanded = _create_expansion(
452
+ X=X,
453
+ interaction_only=self.interaction_only,
454
+ deg=deg,
455
+ n_features=n_features,
456
+ cumulative_size=cumulative_size,
457
+ )
458
+ if expanded is not None:
459
+ to_stack.append(expanded)
460
+ cumulative_size += expanded.shape[1]
461
+ if len(to_stack) == 0:
462
+ # edge case: deal with empty matrix
463
+ XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype)
464
+ else:
465
+ # `scipy.sparse.hstack` breaks in scipy<1.9.2
466
+ # when `n_output_features_ > max_int32`
467
+ all_int32 = all(mat.indices.dtype == np.int32 for mat in to_stack)
468
+ if (
469
+ sp_version < parse_version("1.9.2")
470
+ and self.n_output_features_ > max_int32
471
+ and all_int32
472
+ ):
473
+ raise ValueError( # pragma: no cover
474
+ "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
475
+ " produces negative columns when:\n1. The output shape contains"
476
+ " `n_cols` too large to be represented by a 32bit signed"
477
+ " integer.\n2. All sub-matrices to be stacked have indices of"
478
+ " dtype `np.int32`.\nTo avoid this error, either use a version"
479
+ " of scipy `>=1.9.2` or alter the `PolynomialFeatures`"
480
+ " transformer to produce fewer than 2^31 output features"
481
+ )
482
+ XP = sparse.hstack(to_stack, dtype=X.dtype, format="csr")
483
+ elif sparse.issparse(X) and X.format == "csc" and self._max_degree < 4:
484
+ return self.transform(X.tocsr()).tocsc()
485
+ elif sparse.issparse(X):
486
+ combinations = self._combinations(
487
+ n_features=n_features,
488
+ min_degree=self._min_degree,
489
+ max_degree=self._max_degree,
490
+ interaction_only=self.interaction_only,
491
+ include_bias=self.include_bias,
492
+ )
493
+ columns = []
494
+ for combi in combinations:
495
+ if combi:
496
+ out_col = 1
497
+ for col_idx in combi:
498
+ out_col = X[:, [col_idx]].multiply(out_col)
499
+ columns.append(out_col)
500
+ else:
501
+ bias = sparse.csc_matrix(np.ones((X.shape[0], 1)))
502
+ columns.append(bias)
503
+ XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
504
+ else:
505
+ # Do as if _min_degree = 0 and cut down array after the
506
+ # computation, i.e. use _n_out_full instead of n_output_features_.
507
+ XP = np.empty(
508
+ shape=(n_samples, self._n_out_full), dtype=X.dtype, order=self.order
509
+ )
510
+
511
+ # What follows is a faster implementation of:
512
+ # for i, comb in enumerate(combinations):
513
+ # XP[:, i] = X[:, comb].prod(1)
514
+ # This implementation uses two optimisations.
515
+ # First one is broadcasting,
516
+ # multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1]
517
+ # multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2]
518
+ # ...
519
+ # multiply ([X[:, start:end], X[:, start]) -> ...
520
+ # Second optimisation happens for degrees >= 3.
521
+ # Xi^3 is computed reusing previous computation:
522
+ # Xi^3 = Xi^2 * Xi.
523
+
524
+ # degree 0 term
525
+ if self.include_bias:
526
+ XP[:, 0] = 1
527
+ current_col = 1
528
+ else:
529
+ current_col = 0
530
+
531
+ if self._max_degree == 0:
532
+ return XP
533
+
534
+ # degree 1 term
535
+ XP[:, current_col : current_col + n_features] = X
536
+ index = list(range(current_col, current_col + n_features))
537
+ current_col += n_features
538
+ index.append(current_col)
539
+
540
+ # loop over degree >= 2 terms
541
+ for _ in range(2, self._max_degree + 1):
542
+ new_index = []
543
+ end = index[-1]
544
+ for feature_idx in range(n_features):
545
+ start = index[feature_idx]
546
+ new_index.append(current_col)
547
+ if self.interaction_only:
548
+ start += index[feature_idx + 1] - index[feature_idx]
549
+ next_col = current_col + end - start
550
+ if next_col <= current_col:
551
+ break
552
+ # XP[:, start:end] are terms of degree d - 1
553
+ # that exclude feature #feature_idx.
554
+ np.multiply(
555
+ XP[:, start:end],
556
+ X[:, feature_idx : feature_idx + 1],
557
+ out=XP[:, current_col:next_col],
558
+ casting="no",
559
+ )
560
+ current_col = next_col
561
+
562
+ new_index.append(current_col)
563
+ index = new_index
564
+
565
+ if self._min_degree > 1:
566
+ n_XP, n_Xout = self._n_out_full, self.n_output_features_
567
+ if self.include_bias:
568
+ Xout = np.empty(
569
+ shape=(n_samples, n_Xout), dtype=XP.dtype, order=self.order
570
+ )
571
+ Xout[:, 0] = 1
572
+ Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :]
573
+ else:
574
+ Xout = XP[:, n_XP - n_Xout :].copy()
575
+ XP = Xout
576
+ return XP
577
+
578
+
579
+ class SplineTransformer(TransformerMixin, BaseEstimator):
580
+ """Generate univariate B-spline bases for features.
581
+
582
+ Generate a new feature matrix consisting of
583
+ `n_splines=n_knots + degree - 1` (`n_knots - 1` for
584
+ `extrapolation="periodic"`) spline basis functions
585
+ (B-splines) of polynomial order=`degree` for each feature.
586
+
587
+ In order to learn more about the SplineTransformer class go to:
588
+ :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py`
589
+
590
+ Read more in the :ref:`User Guide <spline_transformer>`.
591
+
592
+ .. versionadded:: 1.0
593
+
594
+ Parameters
595
+ ----------
596
+ n_knots : int, default=5
597
+ Number of knots of the splines if `knots` equals one of
598
+ {'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots`
599
+ is array-like.
600
+
601
+ degree : int, default=3
602
+ The polynomial degree of the spline basis. Must be a non-negative
603
+ integer.
604
+
605
+ knots : {'uniform', 'quantile'} or array-like of shape \
606
+ (n_knots, n_features), default='uniform'
607
+ Set knot positions such that first knot <= features <= last knot.
608
+
609
+ - If 'uniform', `n_knots` number of knots are distributed uniformly
610
+ from min to max values of the features.
611
+ - If 'quantile', they are distributed uniformly along the quantiles of
612
+ the features.
613
+ - If an array-like is given, it directly specifies the sorted knot
614
+ positions including the boundary knots. Note that, internally,
615
+ `degree` number of knots are added before the first knot, the same
616
+ after the last knot.
617
+
618
+ extrapolation : {'error', 'constant', 'linear', 'continue', 'periodic'}, \
619
+ default='constant'
620
+ If 'error', values outside the min and max values of the training
621
+ features raises a `ValueError`. If 'constant', the value of the
622
+ splines at minimum and maximum value of the features is used as
623
+ constant extrapolation. If 'linear', a linear extrapolation is used.
624
+ If 'continue', the splines are extrapolated as is, i.e. option
625
+ `extrapolate=True` in :class:`scipy.interpolate.BSpline`. If
626
+ 'periodic', periodic splines with a periodicity equal to the distance
627
+ between the first and last knot are used. Periodic splines enforce
628
+ equal function values and derivatives at the first and last knot.
629
+ For example, this makes it possible to avoid introducing an arbitrary
630
+ jump between Dec 31st and Jan 1st in spline features derived from a
631
+ naturally periodic "day-of-year" input feature. In this case it is
632
+ recommended to manually set the knot values to control the period.
633
+
634
+ include_bias : bool, default=True
635
+ If False, then the last spline element inside the data range
636
+ of a feature is dropped. As B-splines sum to one over the spline basis
637
+ functions for each data point, they implicitly include a bias term,
638
+ i.e. a column of ones. It acts as an intercept term in a linear models.
639
+
640
+ order : {'C', 'F'}, default='C'
641
+ Order of output array in the dense case. `'F'` order is faster to compute, but
642
+ may slow down subsequent estimators.
643
+
644
+ sparse_output : bool, default=False
645
+ Will return sparse CSR matrix if set True else will return an array. This
646
+ option is only available with `scipy>=1.8`.
647
+
648
+ .. versionadded:: 1.2
649
+
650
+ Attributes
651
+ ----------
652
+ bsplines_ : list of shape (n_features,)
653
+ List of BSplines objects, one for each feature.
654
+
655
+ n_features_in_ : int
656
+ The total number of input features.
657
+
658
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
659
+ Names of features seen during :term:`fit`. Defined only when `X`
660
+ has feature names that are all strings.
661
+
662
+ .. versionadded:: 1.0
663
+
664
+ n_features_out_ : int
665
+ The total number of output features, which is computed as
666
+ `n_features * n_splines`, where `n_splines` is
667
+ the number of bases elements of the B-splines,
668
+ `n_knots + degree - 1` for non-periodic splines and
669
+ `n_knots - 1` for periodic ones.
670
+ If `include_bias=False`, then it is only
671
+ `n_features * (n_splines - 1)`.
672
+
673
+ See Also
674
+ --------
675
+ KBinsDiscretizer : Transformer that bins continuous data into intervals.
676
+
677
+ PolynomialFeatures : Transformer that generates polynomial and interaction
678
+ features.
679
+
680
+ Notes
681
+ -----
682
+ High degrees and a high number of knots can cause overfitting.
683
+
684
+ See :ref:`examples/linear_model/plot_polynomial_interpolation.py
685
+ <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`.
686
+
687
+ Examples
688
+ --------
689
+ >>> import numpy as np
690
+ >>> from sklearn.preprocessing import SplineTransformer
691
+ >>> X = np.arange(6).reshape(6, 1)
692
+ >>> spline = SplineTransformer(degree=2, n_knots=3)
693
+ >>> spline.fit_transform(X)
694
+ array([[0.5 , 0.5 , 0. , 0. ],
695
+ [0.18, 0.74, 0.08, 0. ],
696
+ [0.02, 0.66, 0.32, 0. ],
697
+ [0. , 0.32, 0.66, 0.02],
698
+ [0. , 0.08, 0.74, 0.18],
699
+ [0. , 0. , 0.5 , 0.5 ]])
700
+ """
701
+
702
+ _parameter_constraints: dict = {
703
+ "n_knots": [Interval(Integral, 2, None, closed="left")],
704
+ "degree": [Interval(Integral, 0, None, closed="left")],
705
+ "knots": [StrOptions({"uniform", "quantile"}), "array-like"],
706
+ "extrapolation": [
707
+ StrOptions({"error", "constant", "linear", "continue", "periodic"})
708
+ ],
709
+ "include_bias": ["boolean"],
710
+ "order": [StrOptions({"C", "F"})],
711
+ "sparse_output": ["boolean"],
712
+ }
713
+
714
+ def __init__(
715
+ self,
716
+ n_knots=5,
717
+ degree=3,
718
+ *,
719
+ knots="uniform",
720
+ extrapolation="constant",
721
+ include_bias=True,
722
+ order="C",
723
+ sparse_output=False,
724
+ ):
725
+ self.n_knots = n_knots
726
+ self.degree = degree
727
+ self.knots = knots
728
+ self.extrapolation = extrapolation
729
+ self.include_bias = include_bias
730
+ self.order = order
731
+ self.sparse_output = sparse_output
732
+
733
+ @staticmethod
734
+ def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None):
735
+ """Calculate base knot positions.
736
+
737
+ Base knots such that first knot <= feature <= last knot. For the
738
+ B-spline construction with scipy.interpolate.BSpline, 2*degree knots
739
+ beyond the base interval are added.
740
+
741
+ Returns
742
+ -------
743
+ knots : ndarray of shape (n_knots, n_features), dtype=np.float64
744
+ Knot positions (points) of base interval.
745
+ """
746
+ if knots == "quantile":
747
+ percentiles = 100 * np.linspace(
748
+ start=0, stop=1, num=n_knots, dtype=np.float64
749
+ )
750
+
751
+ if sample_weight is None:
752
+ knots = np.percentile(X, percentiles, axis=0)
753
+ else:
754
+ knots = np.array(
755
+ [
756
+ _weighted_percentile(X, sample_weight, percentile)
757
+ for percentile in percentiles
758
+ ]
759
+ )
760
+
761
+ else:
762
+ # knots == 'uniform':
763
+ # Note that the variable `knots` has already been validated and
764
+ # `else` is therefore safe.
765
+ # Disregard observations with zero weight.
766
+ mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
767
+ x_min = np.amin(X[mask], axis=0)
768
+ x_max = np.amax(X[mask], axis=0)
769
+
770
+ knots = np.linspace(
771
+ start=x_min,
772
+ stop=x_max,
773
+ num=n_knots,
774
+ endpoint=True,
775
+ dtype=np.float64,
776
+ )
777
+
778
+ return knots
779
+
780
+ def get_feature_names_out(self, input_features=None):
781
+ """Get output feature names for transformation.
782
+
783
+ Parameters
784
+ ----------
785
+ input_features : array-like of str or None, default=None
786
+ Input features.
787
+
788
+ - If `input_features` is `None`, then `feature_names_in_` is
789
+ used as feature names in. If `feature_names_in_` is not defined,
790
+ then the following input feature names are generated:
791
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
792
+ - If `input_features` is an array-like, then `input_features` must
793
+ match `feature_names_in_` if `feature_names_in_` is defined.
794
+
795
+ Returns
796
+ -------
797
+ feature_names_out : ndarray of str objects
798
+ Transformed feature names.
799
+ """
800
+ check_is_fitted(self, "n_features_in_")
801
+ n_splines = self.bsplines_[0].c.shape[1]
802
+
803
+ input_features = _check_feature_names_in(self, input_features)
804
+ feature_names = []
805
+ for i in range(self.n_features_in_):
806
+ for j in range(n_splines - 1 + self.include_bias):
807
+ feature_names.append(f"{input_features[i]}_sp_{j}")
808
+ return np.asarray(feature_names, dtype=object)
809
+
810
+ @_fit_context(prefer_skip_nested_validation=True)
811
+ def fit(self, X, y=None, sample_weight=None):
812
+ """Compute knot positions of splines.
813
+
814
+ Parameters
815
+ ----------
816
+ X : array-like of shape (n_samples, n_features)
817
+ The data.
818
+
819
+ y : None
820
+ Ignored.
821
+
822
+ sample_weight : array-like of shape (n_samples,), default = None
823
+ Individual weights for each sample. Used to calculate quantiles if
824
+ `knots="quantile"`. For `knots="uniform"`, zero weighted
825
+ observations are ignored for finding the min and max of `X`.
826
+
827
+ Returns
828
+ -------
829
+ self : object
830
+ Fitted transformer.
831
+ """
832
+ X = self._validate_data(
833
+ X,
834
+ reset=True,
835
+ accept_sparse=False,
836
+ ensure_min_samples=2,
837
+ ensure_2d=True,
838
+ )
839
+ if sample_weight is not None:
840
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
841
+
842
+ _, n_features = X.shape
843
+
844
+ if isinstance(self.knots, str):
845
+ base_knots = self._get_base_knot_positions(
846
+ X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight
847
+ )
848
+ else:
849
+ base_knots = check_array(self.knots, dtype=np.float64)
850
+ if base_knots.shape[0] < 2:
851
+ raise ValueError("Number of knots, knots.shape[0], must be >= 2.")
852
+ elif base_knots.shape[1] != n_features:
853
+ raise ValueError("knots.shape[1] == n_features is violated.")
854
+ elif not np.all(np.diff(base_knots, axis=0) > 0):
855
+ raise ValueError("knots must be sorted without duplicates.")
856
+
857
+ if self.sparse_output and sp_version < parse_version("1.8.0"):
858
+ raise ValueError(
859
+ "Option sparse_output=True is only available with scipy>=1.8.0, "
860
+ f"but here scipy=={sp_version} is used."
861
+ )
862
+
863
+ # number of knots for base interval
864
+ n_knots = base_knots.shape[0]
865
+
866
+ if self.extrapolation == "periodic" and n_knots <= self.degree:
867
+ raise ValueError(
868
+ "Periodic splines require degree < n_knots. Got n_knots="
869
+ f"{n_knots} and degree={self.degree}."
870
+ )
871
+
872
+ # number of splines basis functions
873
+ if self.extrapolation != "periodic":
874
+ n_splines = n_knots + self.degree - 1
875
+ else:
876
+ # periodic splines have self.degree less degrees of freedom
877
+ n_splines = n_knots - 1
878
+
879
+ degree = self.degree
880
+ n_out = n_features * n_splines
881
+ # We have to add degree number of knots below, and degree number knots
882
+ # above the base knots in order to make the spline basis complete.
883
+ if self.extrapolation == "periodic":
884
+ # For periodic splines the spacing of the first / last degree knots
885
+ # needs to be a continuation of the spacing of the last / first
886
+ # base knots.
887
+ period = base_knots[-1] - base_knots[0]
888
+ knots = np.r_[
889
+ base_knots[-(degree + 1) : -1] - period,
890
+ base_knots,
891
+ base_knots[1 : (degree + 1)] + period,
892
+ ]
893
+
894
+ else:
895
+ # Eilers & Marx in "Flexible smoothing with B-splines and
896
+ # penalties" https://doi.org/10.1214/ss/1038425655 advice
897
+ # against repeating first and last knot several times, which
898
+ # would have inferior behaviour at boundaries if combined with
899
+ # a penalty (hence P-Spline). We follow this advice even if our
900
+ # splines are unpenalized. Meaning we do not:
901
+ # knots = np.r_[
902
+ # np.tile(base_knots.min(axis=0), reps=[degree, 1]),
903
+ # base_knots,
904
+ # np.tile(base_knots.max(axis=0), reps=[degree, 1])
905
+ # ]
906
+ # Instead, we reuse the distance of the 2 fist/last knots.
907
+ dist_min = base_knots[1] - base_knots[0]
908
+ dist_max = base_knots[-1] - base_knots[-2]
909
+
910
+ knots = np.r_[
911
+ np.linspace(
912
+ base_knots[0] - degree * dist_min,
913
+ base_knots[0] - dist_min,
914
+ num=degree,
915
+ ),
916
+ base_knots,
917
+ np.linspace(
918
+ base_knots[-1] + dist_max,
919
+ base_knots[-1] + degree * dist_max,
920
+ num=degree,
921
+ ),
922
+ ]
923
+
924
+ # With a diagonal coefficient matrix, we get back the spline basis
925
+ # elements, i.e. the design matrix of the spline.
926
+ # Note, BSpline appreciates C-contiguous float64 arrays as c=coef.
927
+ coef = np.eye(n_splines, dtype=np.float64)
928
+ if self.extrapolation == "periodic":
929
+ coef = np.concatenate((coef, coef[:degree, :]))
930
+
931
+ extrapolate = self.extrapolation in ["periodic", "continue"]
932
+
933
+ bsplines = [
934
+ BSpline.construct_fast(
935
+ knots[:, i], coef, self.degree, extrapolate=extrapolate
936
+ )
937
+ for i in range(n_features)
938
+ ]
939
+ self.bsplines_ = bsplines
940
+
941
+ self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
942
+ return self
943
+
944
+ def transform(self, X):
945
+ """Transform each feature data to B-splines.
946
+
947
+ Parameters
948
+ ----------
949
+ X : array-like of shape (n_samples, n_features)
950
+ The data to transform.
951
+
952
+ Returns
953
+ -------
954
+ XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines)
955
+ The matrix of features, where n_splines is the number of bases
956
+ elements of the B-splines, n_knots + degree - 1.
957
+ """
958
+ check_is_fitted(self)
959
+
960
+ X = self._validate_data(X, reset=False, accept_sparse=False, ensure_2d=True)
961
+
962
+ n_samples, n_features = X.shape
963
+ n_splines = self.bsplines_[0].c.shape[1]
964
+ degree = self.degree
965
+
966
+ # TODO: Remove this condition, once scipy 1.10 is the minimum version.
967
+ # Only scipy => 1.10 supports design_matrix(.., extrapolate=..).
968
+ # The default (implicit in scipy < 1.10) is extrapolate=False.
969
+ scipy_1_10 = sp_version >= parse_version("1.10.0")
970
+ # Note: self.bsplines_[0].extrapolate is True for extrapolation in
971
+ # ["periodic", "continue"]
972
+ if scipy_1_10:
973
+ use_sparse = self.sparse_output
974
+ kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate}
975
+ else:
976
+ use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate
977
+ kwargs_extrapolate = dict()
978
+
979
+ # Note that scipy BSpline returns float64 arrays and converts input
980
+ # x=X[:, i] to c-contiguous float64.
981
+ n_out = self.n_features_out_ + n_features * (1 - self.include_bias)
982
+ if X.dtype in FLOAT_DTYPES:
983
+ dtype = X.dtype
984
+ else:
985
+ dtype = np.float64
986
+ if use_sparse:
987
+ output_list = []
988
+ else:
989
+ XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order)
990
+
991
+ for i in range(n_features):
992
+ spl = self.bsplines_[i]
993
+
994
+ if self.extrapolation in ("continue", "error", "periodic"):
995
+ if self.extrapolation == "periodic":
996
+ # With periodic extrapolation we map x to the segment
997
+ # [spl.t[k], spl.t[n]].
998
+ # This is equivalent to BSpline(.., extrapolate="periodic")
999
+ # for scipy>=1.0.0.
1000
+ n = spl.t.size - spl.k - 1
1001
+ # Assign to new array to avoid inplace operation
1002
+ x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % (
1003
+ spl.t[n] - spl.t[spl.k]
1004
+ )
1005
+ else:
1006
+ x = X[:, i]
1007
+
1008
+ if use_sparse:
1009
+ XBS_sparse = BSpline.design_matrix(
1010
+ x, spl.t, spl.k, **kwargs_extrapolate
1011
+ )
1012
+ if self.extrapolation == "periodic":
1013
+ # See the construction of coef in fit. We need to add the last
1014
+ # degree spline basis function to the first degree ones and
1015
+ # then drop the last ones.
1016
+ # Note: See comment about SparseEfficiencyWarning below.
1017
+ XBS_sparse = XBS_sparse.tolil()
1018
+ XBS_sparse[:, :degree] += XBS_sparse[:, -degree:]
1019
+ XBS_sparse = XBS_sparse[:, :-degree]
1020
+ else:
1021
+ XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x)
1022
+ else: # extrapolation in ("constant", "linear")
1023
+ xmin, xmax = spl.t[degree], spl.t[-degree - 1]
1024
+ # spline values at boundaries
1025
+ f_min, f_max = spl(xmin), spl(xmax)
1026
+ mask = (xmin <= X[:, i]) & (X[:, i] <= xmax)
1027
+ if use_sparse:
1028
+ mask_inv = ~mask
1029
+ x = X[:, i].copy()
1030
+ # Set some arbitrary values outside boundary that will be reassigned
1031
+ # later.
1032
+ x[mask_inv] = spl.t[self.degree]
1033
+ XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k)
1034
+ # Note: Without converting to lil_matrix we would get:
1035
+ # scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity
1036
+ # structure of a csr_matrix is expensive. lil_matrix is more
1037
+ # efficient.
1038
+ if np.any(mask_inv):
1039
+ XBS_sparse = XBS_sparse.tolil()
1040
+ XBS_sparse[mask_inv, :] = 0
1041
+ else:
1042
+ XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i])
1043
+
1044
+ # Note for extrapolation:
1045
+ # 'continue' is already returned as is by scipy BSplines
1046
+ if self.extrapolation == "error":
1047
+ # BSpline with extrapolate=False does not raise an error, but
1048
+ # outputs np.nan.
1049
+ if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or (
1050
+ not use_sparse
1051
+ and np.any(
1052
+ np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)])
1053
+ )
1054
+ ):
1055
+ raise ValueError(
1056
+ "X contains values beyond the limits of the knots."
1057
+ )
1058
+ elif self.extrapolation == "constant":
1059
+ # Set all values beyond xmin and xmax to the value of the
1060
+ # spline basis functions at those two positions.
1061
+ # Only the first degree and last degree number of splines
1062
+ # have non-zero values at the boundaries.
1063
+
1064
+ mask = X[:, i] < xmin
1065
+ if np.any(mask):
1066
+ if use_sparse:
1067
+ # Note: See comment about SparseEfficiencyWarning above.
1068
+ XBS_sparse = XBS_sparse.tolil()
1069
+ XBS_sparse[mask, :degree] = f_min[:degree]
1070
+
1071
+ else:
1072
+ XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[
1073
+ :degree
1074
+ ]
1075
+
1076
+ mask = X[:, i] > xmax
1077
+ if np.any(mask):
1078
+ if use_sparse:
1079
+ # Note: See comment about SparseEfficiencyWarning above.
1080
+ XBS_sparse = XBS_sparse.tolil()
1081
+ XBS_sparse[mask, -degree:] = f_max[-degree:]
1082
+ else:
1083
+ XBS[
1084
+ mask,
1085
+ ((i + 1) * n_splines - degree) : ((i + 1) * n_splines),
1086
+ ] = f_max[-degree:]
1087
+
1088
+ elif self.extrapolation == "linear":
1089
+ # Continue the degree first and degree last spline bases
1090
+ # linearly beyond the boundaries, with slope = derivative at
1091
+ # the boundary.
1092
+ # Note that all others have derivative = value = 0 at the
1093
+ # boundaries.
1094
+
1095
+ # spline derivatives = slopes at boundaries
1096
+ fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1)
1097
+ # Compute the linear continuation.
1098
+ if degree <= 1:
1099
+ # For degree=1, the derivative of 2nd spline is not zero at
1100
+ # boundary. For degree=0 it is the same as 'constant'.
1101
+ degree += 1
1102
+ for j in range(degree):
1103
+ mask = X[:, i] < xmin
1104
+ if np.any(mask):
1105
+ linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j]
1106
+ if use_sparse:
1107
+ # Note: See comment about SparseEfficiencyWarning above.
1108
+ XBS_sparse = XBS_sparse.tolil()
1109
+ XBS_sparse[mask, j] = linear_extr
1110
+ else:
1111
+ XBS[mask, i * n_splines + j] = linear_extr
1112
+
1113
+ mask = X[:, i] > xmax
1114
+ if np.any(mask):
1115
+ k = n_splines - 1 - j
1116
+ linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k]
1117
+ if use_sparse:
1118
+ # Note: See comment about SparseEfficiencyWarning above.
1119
+ XBS_sparse = XBS_sparse.tolil()
1120
+ XBS_sparse[mask, k : k + 1] = linear_extr[:, None]
1121
+ else:
1122
+ XBS[mask, i * n_splines + k] = linear_extr
1123
+
1124
+ if use_sparse:
1125
+ XBS_sparse = XBS_sparse.tocsr()
1126
+ output_list.append(XBS_sparse)
1127
+
1128
+ if use_sparse:
1129
+ # TODO: Remove this conditional error when the minimum supported version of
1130
+ # SciPy is 1.9.2
1131
+ # `scipy.sparse.hstack` breaks in scipy<1.9.2
1132
+ # when `n_features_out_ > max_int32`
1133
+ max_int32 = np.iinfo(np.int32).max
1134
+ all_int32 = True
1135
+ for mat in output_list:
1136
+ all_int32 &= mat.indices.dtype == np.int32
1137
+ if (
1138
+ sp_version < parse_version("1.9.2")
1139
+ and self.n_features_out_ > max_int32
1140
+ and all_int32
1141
+ ):
1142
+ raise ValueError(
1143
+ "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
1144
+ " produces negative columns when:\n1. The output shape contains"
1145
+ " `n_cols` too large to be represented by a 32bit signed"
1146
+ " integer.\n. All sub-matrices to be stacked have indices of"
1147
+ " dtype `np.int32`.\nTo avoid this error, either use a version"
1148
+ " of scipy `>=1.9.2` or alter the `SplineTransformer`"
1149
+ " transformer to produce fewer than 2^31 output features"
1150
+ )
1151
+ XBS = sparse.hstack(output_list, format="csr")
1152
+ elif self.sparse_output:
1153
+ # TODO: Remove ones scipy 1.10 is the minimum version. See comments above.
1154
+ XBS = sparse.csr_matrix(XBS)
1155
+
1156
+ if self.include_bias:
1157
+ return XBS
1158
+ else:
1159
+ # We throw away one spline basis per feature.
1160
+ # We chose the last one.
1161
+ indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0]
1162
+ return XBS[:, indices]
1163
+
1164
+ def _more_tags(self):
1165
+ return {
1166
+ "_xfail_checks": {
1167
+ "check_estimators_pickle": (
1168
+ "Current Scipy implementation of _bsplines does not"
1169
+ "support const memory views."
1170
+ ),
1171
+ }
1172
+ }