applied-ai-018 commited on
Commit
bd32e04
·
verified ·
1 Parent(s): 2cd4d2d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__init__.py +44 -0
  3. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py +267 -0
  4. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py +1110 -0
  5. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py +868 -0
  6. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py +816 -0
  7. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py +857 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py +0 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py +15 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py +525 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py +904 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py +1 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py +1112 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py +2306 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py +671 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py +2190 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py +623 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py +2612 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py +0 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -83,3 +83,4 @@ llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs dif
83
  llmeval-env/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
84
  llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
85
  llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
83
  llmeval-env/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
84
  llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
85
  llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
86
+ llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.covariance` module includes methods and algorithms to
3
+ robustly estimate the covariance of features given a set of points. The
4
+ precision matrix defined as the inverse of the covariance is also estimated.
5
+ Covariance estimation is closely related to the theory of Gaussian Graphical
6
+ Models.
7
+ """
8
+
9
+ from ._elliptic_envelope import EllipticEnvelope
10
+ from ._empirical_covariance import (
11
+ EmpiricalCovariance,
12
+ empirical_covariance,
13
+ log_likelihood,
14
+ )
15
+ from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso
16
+ from ._robust_covariance import MinCovDet, fast_mcd
17
+ from ._shrunk_covariance import (
18
+ OAS,
19
+ LedoitWolf,
20
+ ShrunkCovariance,
21
+ ledoit_wolf,
22
+ ledoit_wolf_shrinkage,
23
+ oas,
24
+ shrunk_covariance,
25
+ )
26
+
27
+ __all__ = [
28
+ "EllipticEnvelope",
29
+ "EmpiricalCovariance",
30
+ "GraphicalLasso",
31
+ "GraphicalLassoCV",
32
+ "LedoitWolf",
33
+ "MinCovDet",
34
+ "OAS",
35
+ "ShrunkCovariance",
36
+ "empirical_covariance",
37
+ "fast_mcd",
38
+ "graphical_lasso",
39
+ "ledoit_wolf",
40
+ "ledoit_wolf_shrinkage",
41
+ "log_likelihood",
42
+ "oas",
43
+ "shrunk_covariance",
44
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Virgile Fritsch <[email protected]>
2
+ #
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import OutlierMixin, _fit_context
10
+ from ..metrics import accuracy_score
11
+ from ..utils._param_validation import Interval
12
+ from ..utils.validation import check_is_fitted
13
+ from ._robust_covariance import MinCovDet
14
+
15
+
16
+ class EllipticEnvelope(OutlierMixin, MinCovDet):
17
+ """An object for detecting outliers in a Gaussian distributed dataset.
18
+
19
+ Read more in the :ref:`User Guide <outlier_detection>`.
20
+
21
+ Parameters
22
+ ----------
23
+ store_precision : bool, default=True
24
+ Specify if the estimated precision is stored.
25
+
26
+ assume_centered : bool, default=False
27
+ If True, the support of robust location and covariance estimates
28
+ is computed, and a covariance estimate is recomputed from it,
29
+ without centering the data.
30
+ Useful to work with data whose mean is significantly equal to
31
+ zero but is not exactly zero.
32
+ If False, the robust location and covariance are directly computed
33
+ with the FastMCD algorithm without additional treatment.
34
+
35
+ support_fraction : float, default=None
36
+ The proportion of points to be included in the support of the raw
37
+ MCD estimate. If None, the minimum value of support_fraction will
38
+ be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`.
39
+ Range is (0, 1).
40
+
41
+ contamination : float, default=0.1
42
+ The amount of contamination of the data set, i.e. the proportion
43
+ of outliers in the data set. Range is (0, 0.5].
44
+
45
+ random_state : int, RandomState instance or None, default=None
46
+ Determines the pseudo random number generator for shuffling
47
+ the data. Pass an int for reproducible results across multiple function
48
+ calls. See :term:`Glossary <random_state>`.
49
+
50
+ Attributes
51
+ ----------
52
+ location_ : ndarray of shape (n_features,)
53
+ Estimated robust location.
54
+
55
+ covariance_ : ndarray of shape (n_features, n_features)
56
+ Estimated robust covariance matrix.
57
+
58
+ precision_ : ndarray of shape (n_features, n_features)
59
+ Estimated pseudo inverse matrix.
60
+ (stored only if store_precision is True)
61
+
62
+ support_ : ndarray of shape (n_samples,)
63
+ A mask of the observations that have been used to compute the
64
+ robust estimates of location and shape.
65
+
66
+ offset_ : float
67
+ Offset used to define the decision function from the raw scores.
68
+ We have the relation: ``decision_function = score_samples - offset_``.
69
+ The offset depends on the contamination parameter and is defined in
70
+ such a way we obtain the expected number of outliers (samples with
71
+ decision function < 0) in training.
72
+
73
+ .. versionadded:: 0.20
74
+
75
+ raw_location_ : ndarray of shape (n_features,)
76
+ The raw robust estimated location before correction and re-weighting.
77
+
78
+ raw_covariance_ : ndarray of shape (n_features, n_features)
79
+ The raw robust estimated covariance before correction and re-weighting.
80
+
81
+ raw_support_ : ndarray of shape (n_samples,)
82
+ A mask of the observations that have been used to compute
83
+ the raw robust estimates of location and shape, before correction
84
+ and re-weighting.
85
+
86
+ dist_ : ndarray of shape (n_samples,)
87
+ Mahalanobis distances of the training set (on which :meth:`fit` is
88
+ called) observations.
89
+
90
+ n_features_in_ : int
91
+ Number of features seen during :term:`fit`.
92
+
93
+ .. versionadded:: 0.24
94
+
95
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
96
+ Names of features seen during :term:`fit`. Defined only when `X`
97
+ has feature names that are all strings.
98
+
99
+ .. versionadded:: 1.0
100
+
101
+ See Also
102
+ --------
103
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
104
+ GraphicalLasso : Sparse inverse covariance estimation
105
+ with an l1-penalized estimator.
106
+ LedoitWolf : LedoitWolf Estimator.
107
+ MinCovDet : Minimum Covariance Determinant
108
+ (robust estimator of covariance).
109
+ OAS : Oracle Approximating Shrinkage Estimator.
110
+ ShrunkCovariance : Covariance estimator with shrinkage.
111
+
112
+ Notes
113
+ -----
114
+ Outlier detection from covariance estimation may break or not
115
+ perform well in high-dimensional settings. In particular, one will
116
+ always take care to work with ``n_samples > n_features ** 2``.
117
+
118
+ References
119
+ ----------
120
+ .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
121
+ minimum covariance determinant estimator" Technometrics 41(3), 212
122
+ (1999)
123
+
124
+ Examples
125
+ --------
126
+ >>> import numpy as np
127
+ >>> from sklearn.covariance import EllipticEnvelope
128
+ >>> true_cov = np.array([[.8, .3],
129
+ ... [.3, .4]])
130
+ >>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
131
+ ... cov=true_cov,
132
+ ... size=500)
133
+ >>> cov = EllipticEnvelope(random_state=0).fit(X)
134
+ >>> # predict returns 1 for an inlier and -1 for an outlier
135
+ >>> cov.predict([[0, 0],
136
+ ... [3, 3]])
137
+ array([ 1, -1])
138
+ >>> cov.covariance_
139
+ array([[0.7411..., 0.2535...],
140
+ [0.2535..., 0.3053...]])
141
+ >>> cov.location_
142
+ array([0.0813... , 0.0427...])
143
+ """
144
+
145
+ _parameter_constraints: dict = {
146
+ **MinCovDet._parameter_constraints,
147
+ "contamination": [Interval(Real, 0, 0.5, closed="right")],
148
+ }
149
+
150
+ def __init__(
151
+ self,
152
+ *,
153
+ store_precision=True,
154
+ assume_centered=False,
155
+ support_fraction=None,
156
+ contamination=0.1,
157
+ random_state=None,
158
+ ):
159
+ super().__init__(
160
+ store_precision=store_precision,
161
+ assume_centered=assume_centered,
162
+ support_fraction=support_fraction,
163
+ random_state=random_state,
164
+ )
165
+ self.contamination = contamination
166
+
167
+ @_fit_context(prefer_skip_nested_validation=True)
168
+ def fit(self, X, y=None):
169
+ """Fit the EllipticEnvelope model.
170
+
171
+ Parameters
172
+ ----------
173
+ X : array-like of shape (n_samples, n_features)
174
+ Training data.
175
+
176
+ y : Ignored
177
+ Not used, present for API consistency by convention.
178
+
179
+ Returns
180
+ -------
181
+ self : object
182
+ Returns the instance itself.
183
+ """
184
+ super().fit(X)
185
+ self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)
186
+ return self
187
+
188
+ def decision_function(self, X):
189
+ """Compute the decision function of the given observations.
190
+
191
+ Parameters
192
+ ----------
193
+ X : array-like of shape (n_samples, n_features)
194
+ The data matrix.
195
+
196
+ Returns
197
+ -------
198
+ decision : ndarray of shape (n_samples,)
199
+ Decision function of the samples.
200
+ It is equal to the shifted Mahalanobis distances.
201
+ The threshold for being an outlier is 0, which ensures a
202
+ compatibility with other outlier detection algorithms.
203
+ """
204
+ check_is_fitted(self)
205
+ negative_mahal_dist = self.score_samples(X)
206
+ return negative_mahal_dist - self.offset_
207
+
208
+ def score_samples(self, X):
209
+ """Compute the negative Mahalanobis distances.
210
+
211
+ Parameters
212
+ ----------
213
+ X : array-like of shape (n_samples, n_features)
214
+ The data matrix.
215
+
216
+ Returns
217
+ -------
218
+ negative_mahal_distances : array-like of shape (n_samples,)
219
+ Opposite of the Mahalanobis distances.
220
+ """
221
+ check_is_fitted(self)
222
+ return -self.mahalanobis(X)
223
+
224
+ def predict(self, X):
225
+ """
226
+ Predict labels (1 inlier, -1 outlier) of X according to fitted model.
227
+
228
+ Parameters
229
+ ----------
230
+ X : array-like of shape (n_samples, n_features)
231
+ The data matrix.
232
+
233
+ Returns
234
+ -------
235
+ is_inlier : ndarray of shape (n_samples,)
236
+ Returns -1 for anomalies/outliers and +1 for inliers.
237
+ """
238
+ values = self.decision_function(X)
239
+ is_inlier = np.full(values.shape[0], -1, dtype=int)
240
+ is_inlier[values >= 0] = 1
241
+
242
+ return is_inlier
243
+
244
+ def score(self, X, y, sample_weight=None):
245
+ """Return the mean accuracy on the given test data and labels.
246
+
247
+ In multi-label classification, this is the subset accuracy
248
+ which is a harsh metric since you require for each sample that
249
+ each label set be correctly predicted.
250
+
251
+ Parameters
252
+ ----------
253
+ X : array-like of shape (n_samples, n_features)
254
+ Test samples.
255
+
256
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
257
+ True labels for X.
258
+
259
+ sample_weight : array-like of shape (n_samples,), default=None
260
+ Sample weights.
261
+
262
+ Returns
263
+ -------
264
+ score : float
265
+ Mean accuracy of self.predict(X) w.r.t. y.
266
+ """
267
+ return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py ADDED
@@ -0,0 +1,1110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """GraphicalLasso: sparse inverse covariance estimation with an l1-penalized
2
+ estimator.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <[email protected]>
6
+ # License: BSD 3 clause
7
+ # Copyright: INRIA
8
+ import operator
9
+ import sys
10
+ import time
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import linalg
16
+
17
+ from ..base import _fit_context
18
+ from ..exceptions import ConvergenceWarning
19
+
20
+ # mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'
21
+ from ..linear_model import _cd_fast as cd_fast # type: ignore
22
+ from ..linear_model import lars_path_gram
23
+ from ..model_selection import check_cv, cross_val_score
24
+ from ..utils._param_validation import Interval, StrOptions, validate_params
25
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
26
+ from ..utils.parallel import Parallel, delayed
27
+ from ..utils.validation import (
28
+ _is_arraylike_not_scalar,
29
+ check_random_state,
30
+ check_scalar,
31
+ )
32
+ from . import EmpiricalCovariance, empirical_covariance, log_likelihood
33
+
34
+
35
+ # Helper functions to compute the objective and dual objective functions
36
+ # of the l1-penalized estimator
37
+ def _objective(mle, precision_, alpha):
38
+ """Evaluation of the graphical-lasso objective function
39
+
40
+ the objective function is made of a shifted scaled version of the
41
+ normalized log-likelihood (i.e. its empirical mean over the samples) and a
42
+ penalisation term to promote sparsity
43
+ """
44
+ p = precision_.shape[0]
45
+ cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
46
+ cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
47
+ return cost
48
+
49
+
50
+ def _dual_gap(emp_cov, precision_, alpha):
51
+ """Expression of the dual gap convergence criterion
52
+
53
+ The specific definition is given in Duchi "Projected Subgradient Methods
54
+ for Learning Sparse Gaussians".
55
+ """
56
+ gap = np.sum(emp_cov * precision_)
57
+ gap -= precision_.shape[0]
58
+ gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
59
+ return gap
60
+
61
+
62
+ # The g-lasso algorithm
63
+ def _graphical_lasso(
64
+ emp_cov,
65
+ alpha,
66
+ *,
67
+ cov_init=None,
68
+ mode="cd",
69
+ tol=1e-4,
70
+ enet_tol=1e-4,
71
+ max_iter=100,
72
+ verbose=False,
73
+ eps=np.finfo(np.float64).eps,
74
+ ):
75
+ _, n_features = emp_cov.shape
76
+ if alpha == 0:
77
+ # Early return without regularization
78
+ precision_ = linalg.inv(emp_cov)
79
+ cost = -2.0 * log_likelihood(emp_cov, precision_)
80
+ cost += n_features * np.log(2 * np.pi)
81
+ d_gap = np.sum(emp_cov * precision_) - n_features
82
+ return emp_cov, precision_, (cost, d_gap), 0
83
+
84
+ if cov_init is None:
85
+ covariance_ = emp_cov.copy()
86
+ else:
87
+ covariance_ = cov_init.copy()
88
+ # As a trivial regularization (Tikhonov like), we scale down the
89
+ # off-diagonal coefficients of our starting point: This is needed, as
90
+ # in the cross-validation the cov_init can easily be
91
+ # ill-conditioned, and the CV loop blows. Beside, this takes
92
+ # conservative stand-point on the initial conditions, and it tends to
93
+ # make the convergence go faster.
94
+ covariance_ *= 0.95
95
+ diagonal = emp_cov.flat[:: n_features + 1]
96
+ covariance_.flat[:: n_features + 1] = diagonal
97
+ precision_ = linalg.pinvh(covariance_)
98
+
99
+ indices = np.arange(n_features)
100
+ i = 0 # initialize the counter to be robust to `max_iter=0`
101
+ costs = list()
102
+ # The different l1 regression solver have different numerical errors
103
+ if mode == "cd":
104
+ errors = dict(over="raise", invalid="ignore")
105
+ else:
106
+ errors = dict(invalid="raise")
107
+ try:
108
+ # be robust to the max_iter=0 edge case, see:
109
+ # https://github.com/scikit-learn/scikit-learn/issues/4134
110
+ d_gap = np.inf
111
+ # set a sub_covariance buffer
112
+ sub_covariance = np.copy(covariance_[1:, 1:], order="C")
113
+ for i in range(max_iter):
114
+ for idx in range(n_features):
115
+ # To keep the contiguous matrix `sub_covariance` equal to
116
+ # covariance_[indices != idx].T[indices != idx]
117
+ # we only need to update 1 column and 1 line when idx changes
118
+ if idx > 0:
119
+ di = idx - 1
120
+ sub_covariance[di] = covariance_[di][indices != idx]
121
+ sub_covariance[:, di] = covariance_[:, di][indices != idx]
122
+ else:
123
+ sub_covariance[:] = covariance_[1:, 1:]
124
+ row = emp_cov[idx, indices != idx]
125
+ with np.errstate(**errors):
126
+ if mode == "cd":
127
+ # Use coordinate descent
128
+ coefs = -(
129
+ precision_[indices != idx, idx]
130
+ / (precision_[idx, idx] + 1000 * eps)
131
+ )
132
+ coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
133
+ coefs,
134
+ alpha,
135
+ 0,
136
+ sub_covariance,
137
+ row,
138
+ row,
139
+ max_iter,
140
+ enet_tol,
141
+ check_random_state(None),
142
+ False,
143
+ )
144
+ else: # mode == "lars"
145
+ _, _, coefs = lars_path_gram(
146
+ Xy=row,
147
+ Gram=sub_covariance,
148
+ n_samples=row.size,
149
+ alpha_min=alpha / (n_features - 1),
150
+ copy_Gram=True,
151
+ eps=eps,
152
+ method="lars",
153
+ return_path=False,
154
+ )
155
+ # Update the precision matrix
156
+ precision_[idx, idx] = 1.0 / (
157
+ covariance_[idx, idx]
158
+ - np.dot(covariance_[indices != idx, idx], coefs)
159
+ )
160
+ precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
161
+ precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
162
+ coefs = np.dot(sub_covariance, coefs)
163
+ covariance_[idx, indices != idx] = coefs
164
+ covariance_[indices != idx, idx] = coefs
165
+ if not np.isfinite(precision_.sum()):
166
+ raise FloatingPointError(
167
+ "The system is too ill-conditioned for this solver"
168
+ )
169
+ d_gap = _dual_gap(emp_cov, precision_, alpha)
170
+ cost = _objective(emp_cov, precision_, alpha)
171
+ if verbose:
172
+ print(
173
+ "[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e"
174
+ % (i, cost, d_gap)
175
+ )
176
+ costs.append((cost, d_gap))
177
+ if np.abs(d_gap) < tol:
178
+ break
179
+ if not np.isfinite(cost) and i > 0:
180
+ raise FloatingPointError(
181
+ "Non SPD result: the system is too ill-conditioned for this solver"
182
+ )
183
+ else:
184
+ warnings.warn(
185
+ "graphical_lasso: did not converge after %i iteration: dual gap: %.3e"
186
+ % (max_iter, d_gap),
187
+ ConvergenceWarning,
188
+ )
189
+ except FloatingPointError as e:
190
+ e.args = (e.args[0] + ". The system is too ill-conditioned for this solver",)
191
+ raise e
192
+
193
+ return covariance_, precision_, costs, i + 1
194
+
195
+
196
+ def alpha_max(emp_cov):
197
+ """Find the maximum alpha for which there are some non-zeros off-diagonal.
198
+
199
+ Parameters
200
+ ----------
201
+ emp_cov : ndarray of shape (n_features, n_features)
202
+ The sample covariance matrix.
203
+
204
+ Notes
205
+ -----
206
+ This results from the bound for the all the Lasso that are solved
207
+ in GraphicalLasso: each time, the row of cov corresponds to Xy. As the
208
+ bound for alpha is given by `max(abs(Xy))`, the result follows.
209
+ """
210
+ A = np.copy(emp_cov)
211
+ A.flat[:: A.shape[0] + 1] = 0
212
+ return np.max(np.abs(A))
213
+
214
+
215
+ @validate_params(
216
+ {
217
+ "emp_cov": ["array-like"],
218
+ "cov_init": ["array-like", None],
219
+ "return_costs": ["boolean"],
220
+ "return_n_iter": ["boolean"],
221
+ },
222
+ prefer_skip_nested_validation=False,
223
+ )
224
+ def graphical_lasso(
225
+ emp_cov,
226
+ alpha,
227
+ *,
228
+ cov_init=None,
229
+ mode="cd",
230
+ tol=1e-4,
231
+ enet_tol=1e-4,
232
+ max_iter=100,
233
+ verbose=False,
234
+ return_costs=False,
235
+ eps=np.finfo(np.float64).eps,
236
+ return_n_iter=False,
237
+ ):
238
+ """L1-penalized covariance estimator.
239
+
240
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
241
+
242
+ .. versionchanged:: v0.20
243
+ graph_lasso has been renamed to graphical_lasso
244
+
245
+ Parameters
246
+ ----------
247
+ emp_cov : array-like of shape (n_features, n_features)
248
+ Empirical covariance from which to compute the covariance estimate.
249
+
250
+ alpha : float
251
+ The regularization parameter: the higher alpha, the more
252
+ regularization, the sparser the inverse covariance.
253
+ Range is (0, inf].
254
+
255
+ cov_init : array of shape (n_features, n_features), default=None
256
+ The initial guess for the covariance. If None, then the empirical
257
+ covariance is used.
258
+
259
+ .. deprecated:: 1.3
260
+ `cov_init` is deprecated in 1.3 and will be removed in 1.5.
261
+ It currently has no effect.
262
+
263
+ mode : {'cd', 'lars'}, default='cd'
264
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
265
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
266
+ which is more numerically stable.
267
+
268
+ tol : float, default=1e-4
269
+ The tolerance to declare convergence: if the dual gap goes below
270
+ this value, iterations are stopped. Range is (0, inf].
271
+
272
+ enet_tol : float, default=1e-4
273
+ The tolerance for the elastic net solver used to calculate the descent
274
+ direction. This parameter controls the accuracy of the search direction
275
+ for a given column update, not of the overall parameter estimate. Only
276
+ used for mode='cd'. Range is (0, inf].
277
+
278
+ max_iter : int, default=100
279
+ The maximum number of iterations.
280
+
281
+ verbose : bool, default=False
282
+ If verbose is True, the objective function and dual gap are
283
+ printed at each iteration.
284
+
285
+ return_costs : bool, default=False
286
+ If return_costs is True, the objective function and dual gap
287
+ at each iteration are returned.
288
+
289
+ eps : float, default=eps
290
+ The machine-precision regularization in the computation of the
291
+ Cholesky diagonal factors. Increase this for very ill-conditioned
292
+ systems. Default is `np.finfo(np.float64).eps`.
293
+
294
+ return_n_iter : bool, default=False
295
+ Whether or not to return the number of iterations.
296
+
297
+ Returns
298
+ -------
299
+ covariance : ndarray of shape (n_features, n_features)
300
+ The estimated covariance matrix.
301
+
302
+ precision : ndarray of shape (n_features, n_features)
303
+ The estimated (sparse) precision matrix.
304
+
305
+ costs : list of (objective, dual_gap) pairs
306
+ The list of values of the objective function and the dual gap at
307
+ each iteration. Returned only if return_costs is True.
308
+
309
+ n_iter : int
310
+ Number of iterations. Returned only if `return_n_iter` is set to True.
311
+
312
+ See Also
313
+ --------
314
+ GraphicalLasso : Sparse inverse covariance estimation
315
+ with an l1-penalized estimator.
316
+ GraphicalLassoCV : Sparse inverse covariance with
317
+ cross-validated choice of the l1 penalty.
318
+
319
+ Notes
320
+ -----
321
+ The algorithm employed to solve this problem is the GLasso algorithm,
322
+ from the Friedman 2008 Biostatistics paper. It is the same algorithm
323
+ as in the R `glasso` package.
324
+
325
+ One possible difference with the `glasso` R package is that the
326
+ diagonal coefficients are not penalized.
327
+
328
+ Examples
329
+ --------
330
+ >>> import numpy as np
331
+ >>> from sklearn.datasets import make_sparse_spd_matrix
332
+ >>> from sklearn.covariance import empirical_covariance, graphical_lasso
333
+ >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42)
334
+ >>> rng = np.random.RandomState(42)
335
+ >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3)
336
+ >>> emp_cov = empirical_covariance(X, assume_centered=True)
337
+ >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05)
338
+ >>> emp_cov
339
+ array([[ 1.68..., 0.21..., -0.20...],
340
+ [ 0.21..., 0.22..., -0.08...],
341
+ [-0.20..., -0.08..., 0.23...]])
342
+ """
343
+
344
+ if cov_init is not None:
345
+ warnings.warn(
346
+ (
347
+ "The cov_init parameter is deprecated in 1.3 and will be removed in "
348
+ "1.5. It does not have any effect."
349
+ ),
350
+ FutureWarning,
351
+ )
352
+
353
+ model = GraphicalLasso(
354
+ alpha=alpha,
355
+ mode=mode,
356
+ covariance="precomputed",
357
+ tol=tol,
358
+ enet_tol=enet_tol,
359
+ max_iter=max_iter,
360
+ verbose=verbose,
361
+ eps=eps,
362
+ assume_centered=True,
363
+ ).fit(emp_cov)
364
+
365
+ output = [model.covariance_, model.precision_]
366
+ if return_costs:
367
+ output.append(model.costs_)
368
+ if return_n_iter:
369
+ output.append(model.n_iter_)
370
+ return tuple(output)
371
+
372
+
373
+ class BaseGraphicalLasso(EmpiricalCovariance):
374
+ _parameter_constraints: dict = {
375
+ **EmpiricalCovariance._parameter_constraints,
376
+ "tol": [Interval(Real, 0, None, closed="right")],
377
+ "enet_tol": [Interval(Real, 0, None, closed="right")],
378
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
379
+ "mode": [StrOptions({"cd", "lars"})],
380
+ "verbose": ["verbose"],
381
+ "eps": [Interval(Real, 0, None, closed="both")],
382
+ }
383
+ _parameter_constraints.pop("store_precision")
384
+
385
+ def __init__(
386
+ self,
387
+ tol=1e-4,
388
+ enet_tol=1e-4,
389
+ max_iter=100,
390
+ mode="cd",
391
+ verbose=False,
392
+ eps=np.finfo(np.float64).eps,
393
+ assume_centered=False,
394
+ ):
395
+ super().__init__(assume_centered=assume_centered)
396
+ self.tol = tol
397
+ self.enet_tol = enet_tol
398
+ self.max_iter = max_iter
399
+ self.mode = mode
400
+ self.verbose = verbose
401
+ self.eps = eps
402
+
403
+
404
+ class GraphicalLasso(BaseGraphicalLasso):
405
+ """Sparse inverse covariance estimation with an l1-penalized estimator.
406
+
407
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
408
+
409
+ .. versionchanged:: v0.20
410
+ GraphLasso has been renamed to GraphicalLasso
411
+
412
+ Parameters
413
+ ----------
414
+ alpha : float, default=0.01
415
+ The regularization parameter: the higher alpha, the more
416
+ regularization, the sparser the inverse covariance.
417
+ Range is (0, inf].
418
+
419
+ mode : {'cd', 'lars'}, default='cd'
420
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
421
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
422
+ which is more numerically stable.
423
+
424
+ covariance : "precomputed", default=None
425
+ If covariance is "precomputed", the input data in `fit` is assumed
426
+ to be the covariance matrix. If `None`, the empirical covariance
427
+ is estimated from the data `X`.
428
+
429
+ .. versionadded:: 1.3
430
+
431
+ tol : float, default=1e-4
432
+ The tolerance to declare convergence: if the dual gap goes below
433
+ this value, iterations are stopped. Range is (0, inf].
434
+
435
+ enet_tol : float, default=1e-4
436
+ The tolerance for the elastic net solver used to calculate the descent
437
+ direction. This parameter controls the accuracy of the search direction
438
+ for a given column update, not of the overall parameter estimate. Only
439
+ used for mode='cd'. Range is (0, inf].
440
+
441
+ max_iter : int, default=100
442
+ The maximum number of iterations.
443
+
444
+ verbose : bool, default=False
445
+ If verbose is True, the objective function and dual gap are
446
+ plotted at each iteration.
447
+
448
+ eps : float, default=eps
449
+ The machine-precision regularization in the computation of the
450
+ Cholesky diagonal factors. Increase this for very ill-conditioned
451
+ systems. Default is `np.finfo(np.float64).eps`.
452
+
453
+ .. versionadded:: 1.3
454
+
455
+ assume_centered : bool, default=False
456
+ If True, data are not centered before computation.
457
+ Useful when working with data whose mean is almost, but not exactly
458
+ zero.
459
+ If False, data are centered before computation.
460
+
461
+ Attributes
462
+ ----------
463
+ location_ : ndarray of shape (n_features,)
464
+ Estimated location, i.e. the estimated mean.
465
+
466
+ covariance_ : ndarray of shape (n_features, n_features)
467
+ Estimated covariance matrix
468
+
469
+ precision_ : ndarray of shape (n_features, n_features)
470
+ Estimated pseudo inverse matrix.
471
+
472
+ n_iter_ : int
473
+ Number of iterations run.
474
+
475
+ costs_ : list of (objective, dual_gap) pairs
476
+ The list of values of the objective function and the dual gap at
477
+ each iteration. Returned only if return_costs is True.
478
+
479
+ .. versionadded:: 1.3
480
+
481
+ n_features_in_ : int
482
+ Number of features seen during :term:`fit`.
483
+
484
+ .. versionadded:: 0.24
485
+
486
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
487
+ Names of features seen during :term:`fit`. Defined only when `X`
488
+ has feature names that are all strings.
489
+
490
+ .. versionadded:: 1.0
491
+
492
+ See Also
493
+ --------
494
+ graphical_lasso : L1-penalized covariance estimator.
495
+ GraphicalLassoCV : Sparse inverse covariance with
496
+ cross-validated choice of the l1 penalty.
497
+
498
+ Examples
499
+ --------
500
+ >>> import numpy as np
501
+ >>> from sklearn.covariance import GraphicalLasso
502
+ >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
503
+ ... [0.0, 0.4, 0.0, 0.0],
504
+ ... [0.2, 0.0, 0.3, 0.1],
505
+ ... [0.0, 0.0, 0.1, 0.7]])
506
+ >>> np.random.seed(0)
507
+ >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
508
+ ... cov=true_cov,
509
+ ... size=200)
510
+ >>> cov = GraphicalLasso().fit(X)
511
+ >>> np.around(cov.covariance_, decimals=3)
512
+ array([[0.816, 0.049, 0.218, 0.019],
513
+ [0.049, 0.364, 0.017, 0.034],
514
+ [0.218, 0.017, 0.322, 0.093],
515
+ [0.019, 0.034, 0.093, 0.69 ]])
516
+ >>> np.around(cov.location_, decimals=3)
517
+ array([0.073, 0.04 , 0.038, 0.143])
518
+ """
519
+
520
+ _parameter_constraints: dict = {
521
+ **BaseGraphicalLasso._parameter_constraints,
522
+ "alpha": [Interval(Real, 0, None, closed="both")],
523
+ "covariance": [StrOptions({"precomputed"}), None],
524
+ }
525
+
526
+ def __init__(
527
+ self,
528
+ alpha=0.01,
529
+ *,
530
+ mode="cd",
531
+ covariance=None,
532
+ tol=1e-4,
533
+ enet_tol=1e-4,
534
+ max_iter=100,
535
+ verbose=False,
536
+ eps=np.finfo(np.float64).eps,
537
+ assume_centered=False,
538
+ ):
539
+ super().__init__(
540
+ tol=tol,
541
+ enet_tol=enet_tol,
542
+ max_iter=max_iter,
543
+ mode=mode,
544
+ verbose=verbose,
545
+ eps=eps,
546
+ assume_centered=assume_centered,
547
+ )
548
+ self.alpha = alpha
549
+ self.covariance = covariance
550
+
551
+ @_fit_context(prefer_skip_nested_validation=True)
552
+ def fit(self, X, y=None):
553
+ """Fit the GraphicalLasso model to X.
554
+
555
+ Parameters
556
+ ----------
557
+ X : array-like of shape (n_samples, n_features)
558
+ Data from which to compute the covariance estimate.
559
+
560
+ y : Ignored
561
+ Not used, present for API consistency by convention.
562
+
563
+ Returns
564
+ -------
565
+ self : object
566
+ Returns the instance itself.
567
+ """
568
+ # Covariance does not make sense for a single feature
569
+ X = self._validate_data(X, ensure_min_features=2, ensure_min_samples=2)
570
+
571
+ if self.covariance == "precomputed":
572
+ emp_cov = X.copy()
573
+ self.location_ = np.zeros(X.shape[1])
574
+ else:
575
+ emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
576
+ if self.assume_centered:
577
+ self.location_ = np.zeros(X.shape[1])
578
+ else:
579
+ self.location_ = X.mean(0)
580
+
581
+ self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
582
+ emp_cov,
583
+ alpha=self.alpha,
584
+ cov_init=None,
585
+ mode=self.mode,
586
+ tol=self.tol,
587
+ enet_tol=self.enet_tol,
588
+ max_iter=self.max_iter,
589
+ verbose=self.verbose,
590
+ eps=self.eps,
591
+ )
592
+ return self
593
+
594
+
595
+ # Cross-validation with GraphicalLasso
596
+ def graphical_lasso_path(
597
+ X,
598
+ alphas,
599
+ cov_init=None,
600
+ X_test=None,
601
+ mode="cd",
602
+ tol=1e-4,
603
+ enet_tol=1e-4,
604
+ max_iter=100,
605
+ verbose=False,
606
+ eps=np.finfo(np.float64).eps,
607
+ ):
608
+ """l1-penalized covariance estimator along a path of decreasing alphas
609
+
610
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
611
+
612
+ Parameters
613
+ ----------
614
+ X : ndarray of shape (n_samples, n_features)
615
+ Data from which to compute the covariance estimate.
616
+
617
+ alphas : array-like of shape (n_alphas,)
618
+ The list of regularization parameters, decreasing order.
619
+
620
+ cov_init : array of shape (n_features, n_features), default=None
621
+ The initial guess for the covariance.
622
+
623
+ X_test : array of shape (n_test_samples, n_features), default=None
624
+ Optional test matrix to measure generalisation error.
625
+
626
+ mode : {'cd', 'lars'}, default='cd'
627
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
628
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
629
+ which is more numerically stable.
630
+
631
+ tol : float, default=1e-4
632
+ The tolerance to declare convergence: if the dual gap goes below
633
+ this value, iterations are stopped. The tolerance must be a positive
634
+ number.
635
+
636
+ enet_tol : float, default=1e-4
637
+ The tolerance for the elastic net solver used to calculate the descent
638
+ direction. This parameter controls the accuracy of the search direction
639
+ for a given column update, not of the overall parameter estimate. Only
640
+ used for mode='cd'. The tolerance must be a positive number.
641
+
642
+ max_iter : int, default=100
643
+ The maximum number of iterations. This parameter should be a strictly
644
+ positive integer.
645
+
646
+ verbose : int or bool, default=False
647
+ The higher the verbosity flag, the more information is printed
648
+ during the fitting.
649
+
650
+ eps : float, default=eps
651
+ The machine-precision regularization in the computation of the
652
+ Cholesky diagonal factors. Increase this for very ill-conditioned
653
+ systems. Default is `np.finfo(np.float64).eps`.
654
+
655
+ .. versionadded:: 1.3
656
+
657
+ Returns
658
+ -------
659
+ covariances_ : list of shape (n_alphas,) of ndarray of shape \
660
+ (n_features, n_features)
661
+ The estimated covariance matrices.
662
+
663
+ precisions_ : list of shape (n_alphas,) of ndarray of shape \
664
+ (n_features, n_features)
665
+ The estimated (sparse) precision matrices.
666
+
667
+ scores_ : list of shape (n_alphas,), dtype=float
668
+ The generalisation error (log-likelihood) on the test data.
669
+ Returned only if test data is passed.
670
+ """
671
+ inner_verbose = max(0, verbose - 1)
672
+ emp_cov = empirical_covariance(X)
673
+ if cov_init is None:
674
+ covariance_ = emp_cov.copy()
675
+ else:
676
+ covariance_ = cov_init
677
+ covariances_ = list()
678
+ precisions_ = list()
679
+ scores_ = list()
680
+ if X_test is not None:
681
+ test_emp_cov = empirical_covariance(X_test)
682
+
683
+ for alpha in alphas:
684
+ try:
685
+ # Capture the errors, and move on
686
+ covariance_, precision_, _, _ = _graphical_lasso(
687
+ emp_cov,
688
+ alpha=alpha,
689
+ cov_init=covariance_,
690
+ mode=mode,
691
+ tol=tol,
692
+ enet_tol=enet_tol,
693
+ max_iter=max_iter,
694
+ verbose=inner_verbose,
695
+ eps=eps,
696
+ )
697
+ covariances_.append(covariance_)
698
+ precisions_.append(precision_)
699
+ if X_test is not None:
700
+ this_score = log_likelihood(test_emp_cov, precision_)
701
+ except FloatingPointError:
702
+ this_score = -np.inf
703
+ covariances_.append(np.nan)
704
+ precisions_.append(np.nan)
705
+ if X_test is not None:
706
+ if not np.isfinite(this_score):
707
+ this_score = -np.inf
708
+ scores_.append(this_score)
709
+ if verbose == 1:
710
+ sys.stderr.write(".")
711
+ elif verbose > 1:
712
+ if X_test is not None:
713
+ print(
714
+ "[graphical_lasso_path] alpha: %.2e, score: %.2e"
715
+ % (alpha, this_score)
716
+ )
717
+ else:
718
+ print("[graphical_lasso_path] alpha: %.2e" % alpha)
719
+ if X_test is not None:
720
+ return covariances_, precisions_, scores_
721
+ return covariances_, precisions_
722
+
723
+
724
+ class GraphicalLassoCV(_RoutingNotSupportedMixin, BaseGraphicalLasso):
725
+ """Sparse inverse covariance w/ cross-validated choice of the l1 penalty.
726
+
727
+ See glossary entry for :term:`cross-validation estimator`.
728
+
729
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
730
+
731
+ .. versionchanged:: v0.20
732
+ GraphLassoCV has been renamed to GraphicalLassoCV
733
+
734
+ Parameters
735
+ ----------
736
+ alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
737
+ If an integer is given, it fixes the number of points on the
738
+ grids of alpha to be used. If a list is given, it gives the
739
+ grid to be used. See the notes in the class docstring for
740
+ more details. Range is [1, inf) for an integer.
741
+ Range is (0, inf] for an array-like of floats.
742
+
743
+ n_refinements : int, default=4
744
+ The number of times the grid is refined. Not used if explicit
745
+ values of alphas are passed. Range is [1, inf).
746
+
747
+ cv : int, cross-validation generator or iterable, default=None
748
+ Determines the cross-validation splitting strategy.
749
+ Possible inputs for cv are:
750
+
751
+ - None, to use the default 5-fold cross-validation,
752
+ - integer, to specify the number of folds.
753
+ - :term:`CV splitter`,
754
+ - An iterable yielding (train, test) splits as arrays of indices.
755
+
756
+ For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.
757
+
758
+ Refer :ref:`User Guide <cross_validation>` for the various
759
+ cross-validation strategies that can be used here.
760
+
761
+ .. versionchanged:: 0.20
762
+ ``cv`` default value if None changed from 3-fold to 5-fold.
763
+
764
+ tol : float, default=1e-4
765
+ The tolerance to declare convergence: if the dual gap goes below
766
+ this value, iterations are stopped. Range is (0, inf].
767
+
768
+ enet_tol : float, default=1e-4
769
+ The tolerance for the elastic net solver used to calculate the descent
770
+ direction. This parameter controls the accuracy of the search direction
771
+ for a given column update, not of the overall parameter estimate. Only
772
+ used for mode='cd'. Range is (0, inf].
773
+
774
+ max_iter : int, default=100
775
+ Maximum number of iterations.
776
+
777
+ mode : {'cd', 'lars'}, default='cd'
778
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
779
+ very sparse underlying graphs, where number of features is greater
780
+ than number of samples. Elsewhere prefer cd which is more numerically
781
+ stable.
782
+
783
+ n_jobs : int, default=None
784
+ Number of jobs to run in parallel.
785
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
786
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
787
+ for more details.
788
+
789
+ .. versionchanged:: v0.20
790
+ `n_jobs` default changed from 1 to None
791
+
792
+ verbose : bool, default=False
793
+ If verbose is True, the objective function and duality gap are
794
+ printed at each iteration.
795
+
796
+ eps : float, default=eps
797
+ The machine-precision regularization in the computation of the
798
+ Cholesky diagonal factors. Increase this for very ill-conditioned
799
+ systems. Default is `np.finfo(np.float64).eps`.
800
+
801
+ .. versionadded:: 1.3
802
+
803
+ assume_centered : bool, default=False
804
+ If True, data are not centered before computation.
805
+ Useful when working with data whose mean is almost, but not exactly
806
+ zero.
807
+ If False, data are centered before computation.
808
+
809
+ Attributes
810
+ ----------
811
+ location_ : ndarray of shape (n_features,)
812
+ Estimated location, i.e. the estimated mean.
813
+
814
+ covariance_ : ndarray of shape (n_features, n_features)
815
+ Estimated covariance matrix.
816
+
817
+ precision_ : ndarray of shape (n_features, n_features)
818
+ Estimated precision matrix (inverse covariance).
819
+
820
+ costs_ : list of (objective, dual_gap) pairs
821
+ The list of values of the objective function and the dual gap at
822
+ each iteration. Returned only if return_costs is True.
823
+
824
+ .. versionadded:: 1.3
825
+
826
+ alpha_ : float
827
+ Penalization parameter selected.
828
+
829
+ cv_results_ : dict of ndarrays
830
+ A dict with keys:
831
+
832
+ alphas : ndarray of shape (n_alphas,)
833
+ All penalization parameters explored.
834
+
835
+ split(k)_test_score : ndarray of shape (n_alphas,)
836
+ Log-likelihood score on left-out data across (k)th fold.
837
+
838
+ .. versionadded:: 1.0
839
+
840
+ mean_test_score : ndarray of shape (n_alphas,)
841
+ Mean of scores over the folds.
842
+
843
+ .. versionadded:: 1.0
844
+
845
+ std_test_score : ndarray of shape (n_alphas,)
846
+ Standard deviation of scores over the folds.
847
+
848
+ .. versionadded:: 1.0
849
+
850
+ n_iter_ : int
851
+ Number of iterations run for the optimal alpha.
852
+
853
+ n_features_in_ : int
854
+ Number of features seen during :term:`fit`.
855
+
856
+ .. versionadded:: 0.24
857
+
858
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
859
+ Names of features seen during :term:`fit`. Defined only when `X`
860
+ has feature names that are all strings.
861
+
862
+ .. versionadded:: 1.0
863
+
864
+ See Also
865
+ --------
866
+ graphical_lasso : L1-penalized covariance estimator.
867
+ GraphicalLasso : Sparse inverse covariance estimation
868
+ with an l1-penalized estimator.
869
+
870
+ Notes
871
+ -----
872
+ The search for the optimal penalization parameter (`alpha`) is done on an
873
+ iteratively refined grid: first the cross-validated scores on a grid are
874
+ computed, then a new refined grid is centered around the maximum, and so
875
+ on.
876
+
877
+ One of the challenges which is faced here is that the solvers can
878
+ fail to converge to a well-conditioned estimate. The corresponding
879
+ values of `alpha` then come out as missing values, but the optimum may
880
+ be close to these missing values.
881
+
882
+ In `fit`, once the best parameter `alpha` is found through
883
+ cross-validation, the model is fit again using the entire training set.
884
+
885
+ Examples
886
+ --------
887
+ >>> import numpy as np
888
+ >>> from sklearn.covariance import GraphicalLassoCV
889
+ >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
890
+ ... [0.0, 0.4, 0.0, 0.0],
891
+ ... [0.2, 0.0, 0.3, 0.1],
892
+ ... [0.0, 0.0, 0.1, 0.7]])
893
+ >>> np.random.seed(0)
894
+ >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
895
+ ... cov=true_cov,
896
+ ... size=200)
897
+ >>> cov = GraphicalLassoCV().fit(X)
898
+ >>> np.around(cov.covariance_, decimals=3)
899
+ array([[0.816, 0.051, 0.22 , 0.017],
900
+ [0.051, 0.364, 0.018, 0.036],
901
+ [0.22 , 0.018, 0.322, 0.094],
902
+ [0.017, 0.036, 0.094, 0.69 ]])
903
+ >>> np.around(cov.location_, decimals=3)
904
+ array([0.073, 0.04 , 0.038, 0.143])
905
+ """
906
+
907
+ _parameter_constraints: dict = {
908
+ **BaseGraphicalLasso._parameter_constraints,
909
+ "alphas": [Interval(Integral, 0, None, closed="left"), "array-like"],
910
+ "n_refinements": [Interval(Integral, 1, None, closed="left")],
911
+ "cv": ["cv_object"],
912
+ "n_jobs": [Integral, None],
913
+ }
914
+
915
+ def __init__(
916
+ self,
917
+ *,
918
+ alphas=4,
919
+ n_refinements=4,
920
+ cv=None,
921
+ tol=1e-4,
922
+ enet_tol=1e-4,
923
+ max_iter=100,
924
+ mode="cd",
925
+ n_jobs=None,
926
+ verbose=False,
927
+ eps=np.finfo(np.float64).eps,
928
+ assume_centered=False,
929
+ ):
930
+ super().__init__(
931
+ tol=tol,
932
+ enet_tol=enet_tol,
933
+ max_iter=max_iter,
934
+ mode=mode,
935
+ verbose=verbose,
936
+ eps=eps,
937
+ assume_centered=assume_centered,
938
+ )
939
+ self.alphas = alphas
940
+ self.n_refinements = n_refinements
941
+ self.cv = cv
942
+ self.n_jobs = n_jobs
943
+
944
+ @_fit_context(prefer_skip_nested_validation=True)
945
+ def fit(self, X, y=None):
946
+ """Fit the GraphicalLasso covariance model to X.
947
+
948
+ Parameters
949
+ ----------
950
+ X : array-like of shape (n_samples, n_features)
951
+ Data from which to compute the covariance estimate.
952
+
953
+ y : Ignored
954
+ Not used, present for API consistency by convention.
955
+
956
+ Returns
957
+ -------
958
+ self : object
959
+ Returns the instance itself.
960
+ """
961
+ # Covariance does not make sense for a single feature
962
+ X = self._validate_data(X, ensure_min_features=2)
963
+ if self.assume_centered:
964
+ self.location_ = np.zeros(X.shape[1])
965
+ else:
966
+ self.location_ = X.mean(0)
967
+ emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
968
+
969
+ cv = check_cv(self.cv, y, classifier=False)
970
+
971
+ # List of (alpha, scores, covs)
972
+ path = list()
973
+ n_alphas = self.alphas
974
+ inner_verbose = max(0, self.verbose - 1)
975
+
976
+ if _is_arraylike_not_scalar(n_alphas):
977
+ for alpha in self.alphas:
978
+ check_scalar(
979
+ alpha,
980
+ "alpha",
981
+ Real,
982
+ min_val=0,
983
+ max_val=np.inf,
984
+ include_boundaries="right",
985
+ )
986
+ alphas = self.alphas
987
+ n_refinements = 1
988
+ else:
989
+ n_refinements = self.n_refinements
990
+ alpha_1 = alpha_max(emp_cov)
991
+ alpha_0 = 1e-2 * alpha_1
992
+ alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1]
993
+
994
+ t0 = time.time()
995
+ for i in range(n_refinements):
996
+ with warnings.catch_warnings():
997
+ # No need to see the convergence warnings on this grid:
998
+ # they will always be points that will not converge
999
+ # during the cross-validation
1000
+ warnings.simplefilter("ignore", ConvergenceWarning)
1001
+ # Compute the cross-validated loss on the current grid
1002
+
1003
+ # NOTE: Warm-restarting graphical_lasso_path has been tried,
1004
+ # and this did not allow to gain anything
1005
+ # (same execution time with or without).
1006
+ this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
1007
+ delayed(graphical_lasso_path)(
1008
+ X[train],
1009
+ alphas=alphas,
1010
+ X_test=X[test],
1011
+ mode=self.mode,
1012
+ tol=self.tol,
1013
+ enet_tol=self.enet_tol,
1014
+ max_iter=int(0.1 * self.max_iter),
1015
+ verbose=inner_verbose,
1016
+ eps=self.eps,
1017
+ )
1018
+ for train, test in cv.split(X, y)
1019
+ )
1020
+
1021
+ # Little danse to transform the list in what we need
1022
+ covs, _, scores = zip(*this_path)
1023
+ covs = zip(*covs)
1024
+ scores = zip(*scores)
1025
+ path.extend(zip(alphas, scores, covs))
1026
+ path = sorted(path, key=operator.itemgetter(0), reverse=True)
1027
+
1028
+ # Find the maximum (avoid using built in 'max' function to
1029
+ # have a fully-reproducible selection of the smallest alpha
1030
+ # in case of equality)
1031
+ best_score = -np.inf
1032
+ last_finite_idx = 0
1033
+ for index, (alpha, scores, _) in enumerate(path):
1034
+ this_score = np.mean(scores)
1035
+ if this_score >= 0.1 / np.finfo(np.float64).eps:
1036
+ this_score = np.nan
1037
+ if np.isfinite(this_score):
1038
+ last_finite_idx = index
1039
+ if this_score >= best_score:
1040
+ best_score = this_score
1041
+ best_index = index
1042
+
1043
+ # Refine the grid
1044
+ if best_index == 0:
1045
+ # We do not need to go back: we have chosen
1046
+ # the highest value of alpha for which there are
1047
+ # non-zero coefficients
1048
+ alpha_1 = path[0][0]
1049
+ alpha_0 = path[1][0]
1050
+ elif best_index == last_finite_idx and not best_index == len(path) - 1:
1051
+ # We have non-converged models on the upper bound of the
1052
+ # grid, we need to refine the grid there
1053
+ alpha_1 = path[best_index][0]
1054
+ alpha_0 = path[best_index + 1][0]
1055
+ elif best_index == len(path) - 1:
1056
+ alpha_1 = path[best_index][0]
1057
+ alpha_0 = 0.01 * path[best_index][0]
1058
+ else:
1059
+ alpha_1 = path[best_index - 1][0]
1060
+ alpha_0 = path[best_index + 1][0]
1061
+
1062
+ if not _is_arraylike_not_scalar(n_alphas):
1063
+ alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2)
1064
+ alphas = alphas[1:-1]
1065
+
1066
+ if self.verbose and n_refinements > 1:
1067
+ print(
1068
+ "[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is"
1069
+ % (i + 1, n_refinements, time.time() - t0)
1070
+ )
1071
+
1072
+ path = list(zip(*path))
1073
+ grid_scores = list(path[1])
1074
+ alphas = list(path[0])
1075
+ # Finally, compute the score with alpha = 0
1076
+ alphas.append(0)
1077
+ grid_scores.append(
1078
+ cross_val_score(
1079
+ EmpiricalCovariance(),
1080
+ X,
1081
+ cv=cv,
1082
+ n_jobs=self.n_jobs,
1083
+ verbose=inner_verbose,
1084
+ )
1085
+ )
1086
+ grid_scores = np.array(grid_scores)
1087
+
1088
+ self.cv_results_ = {"alphas": np.array(alphas)}
1089
+
1090
+ for i in range(grid_scores.shape[1]):
1091
+ self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i]
1092
+
1093
+ self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1)
1094
+ self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1)
1095
+
1096
+ best_alpha = alphas[best_index]
1097
+ self.alpha_ = best_alpha
1098
+
1099
+ # Finally fit the model with the selected alpha
1100
+ self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
1101
+ emp_cov,
1102
+ alpha=best_alpha,
1103
+ mode=self.mode,
1104
+ tol=self.tol,
1105
+ enet_tol=self.enet_tol,
1106
+ max_iter=self.max_iter,
1107
+ verbose=inner_verbose,
1108
+ eps=self.eps,
1109
+ )
1110
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Robust location and covariance estimators.
3
+
4
+ Here are implemented estimators that are resistant to outliers.
5
+
6
+ """
7
+ # Author: Virgile Fritsch <[email protected]>
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import linalg
16
+ from scipy.stats import chi2
17
+
18
+ from ..base import _fit_context
19
+ from ..utils import check_array, check_random_state
20
+ from ..utils._param_validation import Interval
21
+ from ..utils.extmath import fast_logdet
22
+ from ._empirical_covariance import EmpiricalCovariance, empirical_covariance
23
+
24
+
25
+ # Minimum Covariance Determinant
26
+ # Implementing of an algorithm by Rousseeuw & Van Driessen described in
27
+ # (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
28
+ # 1999, American Statistical Association and the American Society
29
+ # for Quality, TECHNOMETRICS)
30
+ # XXX Is this really a public function? It's not listed in the docs or
31
+ # exported by sklearn.covariance. Deprecate?
32
+ def c_step(
33
+ X,
34
+ n_support,
35
+ remaining_iterations=30,
36
+ initial_estimates=None,
37
+ verbose=False,
38
+ cov_computation_method=empirical_covariance,
39
+ random_state=None,
40
+ ):
41
+ """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
42
+
43
+ Parameters
44
+ ----------
45
+ X : array-like of shape (n_samples, n_features)
46
+ Data set in which we look for the n_support observations whose
47
+ scatter matrix has minimum determinant.
48
+
49
+ n_support : int
50
+ Number of observations to compute the robust estimates of location
51
+ and covariance from. This parameter must be greater than
52
+ `n_samples / 2`.
53
+
54
+ remaining_iterations : int, default=30
55
+ Number of iterations to perform.
56
+ According to [Rouseeuw1999]_, two iterations are sufficient to get
57
+ close to the minimum, and we never need more than 30 to reach
58
+ convergence.
59
+
60
+ initial_estimates : tuple of shape (2,), default=None
61
+ Initial estimates of location and shape from which to run the c_step
62
+ procedure:
63
+ - initial_estimates[0]: an initial location estimate
64
+ - initial_estimates[1]: an initial covariance estimate
65
+
66
+ verbose : bool, default=False
67
+ Verbose mode.
68
+
69
+ cov_computation_method : callable, \
70
+ default=:func:`sklearn.covariance.empirical_covariance`
71
+ The function which will be used to compute the covariance.
72
+ Must return array of shape (n_features, n_features).
73
+
74
+ random_state : int, RandomState instance or None, default=None
75
+ Determines the pseudo random number generator for shuffling the data.
76
+ Pass an int for reproducible results across multiple function calls.
77
+ See :term:`Glossary <random_state>`.
78
+
79
+ Returns
80
+ -------
81
+ location : ndarray of shape (n_features,)
82
+ Robust location estimates.
83
+
84
+ covariance : ndarray of shape (n_features, n_features)
85
+ Robust covariance estimates.
86
+
87
+ support : ndarray of shape (n_samples,)
88
+ A mask for the `n_support` observations whose scatter matrix has
89
+ minimum determinant.
90
+
91
+ References
92
+ ----------
93
+ .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
94
+ Estimator, 1999, American Statistical Association and the American
95
+ Society for Quality, TECHNOMETRICS
96
+ """
97
+ X = np.asarray(X)
98
+ random_state = check_random_state(random_state)
99
+ return _c_step(
100
+ X,
101
+ n_support,
102
+ remaining_iterations=remaining_iterations,
103
+ initial_estimates=initial_estimates,
104
+ verbose=verbose,
105
+ cov_computation_method=cov_computation_method,
106
+ random_state=random_state,
107
+ )
108
+
109
+
110
+ def _c_step(
111
+ X,
112
+ n_support,
113
+ random_state,
114
+ remaining_iterations=30,
115
+ initial_estimates=None,
116
+ verbose=False,
117
+ cov_computation_method=empirical_covariance,
118
+ ):
119
+ n_samples, n_features = X.shape
120
+ dist = np.inf
121
+
122
+ # Initialisation
123
+ support = np.zeros(n_samples, dtype=bool)
124
+ if initial_estimates is None:
125
+ # compute initial robust estimates from a random subset
126
+ support[random_state.permutation(n_samples)[:n_support]] = True
127
+ else:
128
+ # get initial robust estimates from the function parameters
129
+ location = initial_estimates[0]
130
+ covariance = initial_estimates[1]
131
+ # run a special iteration for that case (to get an initial support)
132
+ precision = linalg.pinvh(covariance)
133
+ X_centered = X - location
134
+ dist = (np.dot(X_centered, precision) * X_centered).sum(1)
135
+ # compute new estimates
136
+ support[np.argsort(dist)[:n_support]] = True
137
+
138
+ X_support = X[support]
139
+ location = X_support.mean(0)
140
+ covariance = cov_computation_method(X_support)
141
+
142
+ # Iterative procedure for Minimum Covariance Determinant computation
143
+ det = fast_logdet(covariance)
144
+ # If the data already has singular covariance, calculate the precision,
145
+ # as the loop below will not be entered.
146
+ if np.isinf(det):
147
+ precision = linalg.pinvh(covariance)
148
+
149
+ previous_det = np.inf
150
+ while det < previous_det and remaining_iterations > 0 and not np.isinf(det):
151
+ # save old estimates values
152
+ previous_location = location
153
+ previous_covariance = covariance
154
+ previous_det = det
155
+ previous_support = support
156
+ # compute a new support from the full data set mahalanobis distances
157
+ precision = linalg.pinvh(covariance)
158
+ X_centered = X - location
159
+ dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
160
+ # compute new estimates
161
+ support = np.zeros(n_samples, dtype=bool)
162
+ support[np.argsort(dist)[:n_support]] = True
163
+ X_support = X[support]
164
+ location = X_support.mean(axis=0)
165
+ covariance = cov_computation_method(X_support)
166
+ det = fast_logdet(covariance)
167
+ # update remaining iterations for early stopping
168
+ remaining_iterations -= 1
169
+
170
+ previous_dist = dist
171
+ dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
172
+ # Check if best fit already found (det => 0, logdet => -inf)
173
+ if np.isinf(det):
174
+ results = location, covariance, det, support, dist
175
+ # Check convergence
176
+ if np.allclose(det, previous_det):
177
+ # c_step procedure converged
178
+ if verbose:
179
+ print(
180
+ "Optimal couple (location, covariance) found before"
181
+ " ending iterations (%d left)" % (remaining_iterations)
182
+ )
183
+ results = location, covariance, det, support, dist
184
+ elif det > previous_det:
185
+ # determinant has increased (should not happen)
186
+ warnings.warn(
187
+ "Determinant has increased; this should not happen: "
188
+ "log(det) > log(previous_det) (%.15f > %.15f). "
189
+ "You may want to try with a higher value of "
190
+ "support_fraction (current value: %.3f)."
191
+ % (det, previous_det, n_support / n_samples),
192
+ RuntimeWarning,
193
+ )
194
+ results = (
195
+ previous_location,
196
+ previous_covariance,
197
+ previous_det,
198
+ previous_support,
199
+ previous_dist,
200
+ )
201
+
202
+ # Check early stopping
203
+ if remaining_iterations == 0:
204
+ if verbose:
205
+ print("Maximum number of iterations reached")
206
+ results = location, covariance, det, support, dist
207
+
208
+ return results
209
+
210
+
211
+ def select_candidates(
212
+ X,
213
+ n_support,
214
+ n_trials,
215
+ select=1,
216
+ n_iter=30,
217
+ verbose=False,
218
+ cov_computation_method=empirical_covariance,
219
+ random_state=None,
220
+ ):
221
+ """Finds the best pure subset of observations to compute MCD from it.
222
+
223
+ The purpose of this function is to find the best sets of n_support
224
+ observations with respect to a minimization of their covariance
225
+ matrix determinant. Equivalently, it removes n_samples-n_support
226
+ observations to construct what we call a pure data set (i.e. not
227
+ containing outliers). The list of the observations of the pure
228
+ data set is referred to as the `support`.
229
+
230
+ Starting from a random support, the pure data set is found by the
231
+ c_step procedure introduced by Rousseeuw and Van Driessen in
232
+ [RV]_.
233
+
234
+ Parameters
235
+ ----------
236
+ X : array-like of shape (n_samples, n_features)
237
+ Data (sub)set in which we look for the n_support purest observations.
238
+
239
+ n_support : int
240
+ The number of samples the pure data set must contain.
241
+ This parameter must be in the range `[(n + p + 1)/2] < n_support < n`.
242
+
243
+ n_trials : int or tuple of shape (2,)
244
+ Number of different initial sets of observations from which to
245
+ run the algorithm. This parameter should be a strictly positive
246
+ integer.
247
+ Instead of giving a number of trials to perform, one can provide a
248
+ list of initial estimates that will be used to iteratively run
249
+ c_step procedures. In this case:
250
+ - n_trials[0]: array-like, shape (n_trials, n_features)
251
+ is the list of `n_trials` initial location estimates
252
+ - n_trials[1]: array-like, shape (n_trials, n_features, n_features)
253
+ is the list of `n_trials` initial covariances estimates
254
+
255
+ select : int, default=1
256
+ Number of best candidates results to return. This parameter must be
257
+ a strictly positive integer.
258
+
259
+ n_iter : int, default=30
260
+ Maximum number of iterations for the c_step procedure.
261
+ (2 is enough to be close to the final solution. "Never" exceeds 20).
262
+ This parameter must be a strictly positive integer.
263
+
264
+ verbose : bool, default=False
265
+ Control the output verbosity.
266
+
267
+ cov_computation_method : callable, \
268
+ default=:func:`sklearn.covariance.empirical_covariance`
269
+ The function which will be used to compute the covariance.
270
+ Must return an array of shape (n_features, n_features).
271
+
272
+ random_state : int, RandomState instance or None, default=None
273
+ Determines the pseudo random number generator for shuffling the data.
274
+ Pass an int for reproducible results across multiple function calls.
275
+ See :term:`Glossary <random_state>`.
276
+
277
+ See Also
278
+ ---------
279
+ c_step
280
+
281
+ Returns
282
+ -------
283
+ best_locations : ndarray of shape (select, n_features)
284
+ The `select` location estimates computed from the `select` best
285
+ supports found in the data set (`X`).
286
+
287
+ best_covariances : ndarray of shape (select, n_features, n_features)
288
+ The `select` covariance estimates computed from the `select`
289
+ best supports found in the data set (`X`).
290
+
291
+ best_supports : ndarray of shape (select, n_samples)
292
+ The `select` best supports found in the data set (`X`).
293
+
294
+ References
295
+ ----------
296
+ .. [RV] A Fast Algorithm for the Minimum Covariance Determinant
297
+ Estimator, 1999, American Statistical Association and the American
298
+ Society for Quality, TECHNOMETRICS
299
+ """
300
+ random_state = check_random_state(random_state)
301
+
302
+ if isinstance(n_trials, Integral):
303
+ run_from_estimates = False
304
+ elif isinstance(n_trials, tuple):
305
+ run_from_estimates = True
306
+ estimates_list = n_trials
307
+ n_trials = estimates_list[0].shape[0]
308
+ else:
309
+ raise TypeError(
310
+ "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)"
311
+ % (n_trials, type(n_trials))
312
+ )
313
+
314
+ # compute `n_trials` location and shape estimates candidates in the subset
315
+ all_estimates = []
316
+ if not run_from_estimates:
317
+ # perform `n_trials` computations from random initial supports
318
+ for j in range(n_trials):
319
+ all_estimates.append(
320
+ _c_step(
321
+ X,
322
+ n_support,
323
+ remaining_iterations=n_iter,
324
+ verbose=verbose,
325
+ cov_computation_method=cov_computation_method,
326
+ random_state=random_state,
327
+ )
328
+ )
329
+ else:
330
+ # perform computations from every given initial estimates
331
+ for j in range(n_trials):
332
+ initial_estimates = (estimates_list[0][j], estimates_list[1][j])
333
+ all_estimates.append(
334
+ _c_step(
335
+ X,
336
+ n_support,
337
+ remaining_iterations=n_iter,
338
+ initial_estimates=initial_estimates,
339
+ verbose=verbose,
340
+ cov_computation_method=cov_computation_method,
341
+ random_state=random_state,
342
+ )
343
+ )
344
+ all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip(
345
+ *all_estimates
346
+ )
347
+ # find the `n_best` best results among the `n_trials` ones
348
+ index_best = np.argsort(all_dets_sub)[:select]
349
+ best_locations = np.asarray(all_locs_sub)[index_best]
350
+ best_covariances = np.asarray(all_covs_sub)[index_best]
351
+ best_supports = np.asarray(all_supports_sub)[index_best]
352
+ best_ds = np.asarray(all_ds_sub)[index_best]
353
+
354
+ return best_locations, best_covariances, best_supports, best_ds
355
+
356
+
357
+ def fast_mcd(
358
+ X,
359
+ support_fraction=None,
360
+ cov_computation_method=empirical_covariance,
361
+ random_state=None,
362
+ ):
363
+ """Estimate the Minimum Covariance Determinant matrix.
364
+
365
+ Read more in the :ref:`User Guide <robust_covariance>`.
366
+
367
+ Parameters
368
+ ----------
369
+ X : array-like of shape (n_samples, n_features)
370
+ The data matrix, with p features and n samples.
371
+
372
+ support_fraction : float, default=None
373
+ The proportion of points to be included in the support of the raw
374
+ MCD estimate. Default is `None`, which implies that the minimum
375
+ value of `support_fraction` will be used within the algorithm:
376
+ `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
377
+ in the range (0, 1).
378
+
379
+ cov_computation_method : callable, \
380
+ default=:func:`sklearn.covariance.empirical_covariance`
381
+ The function which will be used to compute the covariance.
382
+ Must return an array of shape (n_features, n_features).
383
+
384
+ random_state : int, RandomState instance or None, default=None
385
+ Determines the pseudo random number generator for shuffling the data.
386
+ Pass an int for reproducible results across multiple function calls.
387
+ See :term:`Glossary <random_state>`.
388
+
389
+ Returns
390
+ -------
391
+ location : ndarray of shape (n_features,)
392
+ Robust location of the data.
393
+
394
+ covariance : ndarray of shape (n_features, n_features)
395
+ Robust covariance of the features.
396
+
397
+ support : ndarray of shape (n_samples,), dtype=bool
398
+ A mask of the observations that have been used to compute
399
+ the robust location and covariance estimates of the data set.
400
+
401
+ Notes
402
+ -----
403
+ The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
404
+ in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
405
+ 1999, American Statistical Association and the American Society
406
+ for Quality, TECHNOMETRICS".
407
+ The principle is to compute robust estimates and random subsets before
408
+ pooling them into a larger subsets, and finally into the full data set.
409
+ Depending on the size of the initial sample, we have one, two or three
410
+ such computation levels.
411
+
412
+ Note that only raw estimates are returned. If one is interested in
413
+ the correction and reweighting steps described in [RouseeuwVan]_,
414
+ see the MinCovDet object.
415
+
416
+ References
417
+ ----------
418
+
419
+ .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
420
+ Determinant Estimator, 1999, American Statistical Association
421
+ and the American Society for Quality, TECHNOMETRICS
422
+
423
+ .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
424
+ Asymptotics For The Minimum Covariance Determinant Estimator,
425
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
426
+ """
427
+ random_state = check_random_state(random_state)
428
+
429
+ X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
430
+ n_samples, n_features = X.shape
431
+
432
+ # minimum breakdown value
433
+ if support_fraction is None:
434
+ n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
435
+ else:
436
+ n_support = int(support_fraction * n_samples)
437
+
438
+ # 1-dimensional case quick computation
439
+ # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
440
+ # Regression and Outlier Detection, John Wiley & Sons, chapter 4)
441
+ if n_features == 1:
442
+ if n_support < n_samples:
443
+ # find the sample shortest halves
444
+ X_sorted = np.sort(np.ravel(X))
445
+ diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
446
+ halves_start = np.where(diff == np.min(diff))[0]
447
+ # take the middle points' mean to get the robust location estimate
448
+ location = (
449
+ 0.5
450
+ * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
451
+ )
452
+ support = np.zeros(n_samples, dtype=bool)
453
+ X_centered = X - location
454
+ support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
455
+ covariance = np.asarray([[np.var(X[support])]])
456
+ location = np.array([location])
457
+ # get precision matrix in an optimized way
458
+ precision = linalg.pinvh(covariance)
459
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
460
+ else:
461
+ support = np.ones(n_samples, dtype=bool)
462
+ covariance = np.asarray([[np.var(X)]])
463
+ location = np.asarray([np.mean(X)])
464
+ X_centered = X - location
465
+ # get precision matrix in an optimized way
466
+ precision = linalg.pinvh(covariance)
467
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
468
+ # Starting FastMCD algorithm for p-dimensional case
469
+ if (n_samples > 500) and (n_features > 1):
470
+ # 1. Find candidate supports on subsets
471
+ # a. split the set in subsets of size ~ 300
472
+ n_subsets = n_samples // 300
473
+ n_samples_subsets = n_samples // n_subsets
474
+ samples_shuffle = random_state.permutation(n_samples)
475
+ h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
476
+ # b. perform a total of 500 trials
477
+ n_trials_tot = 500
478
+ # c. select 10 best (location, covariance) for each subset
479
+ n_best_sub = 10
480
+ n_trials = max(10, n_trials_tot // n_subsets)
481
+ n_best_tot = n_subsets * n_best_sub
482
+ all_best_locations = np.zeros((n_best_tot, n_features))
483
+ try:
484
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
485
+ except MemoryError:
486
+ # The above is too big. Let's try with something much small
487
+ # (and less optimal)
488
+ n_best_tot = 10
489
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
490
+ n_best_sub = 2
491
+ for i in range(n_subsets):
492
+ low_bound = i * n_samples_subsets
493
+ high_bound = low_bound + n_samples_subsets
494
+ current_subset = X[samples_shuffle[low_bound:high_bound]]
495
+ best_locations_sub, best_covariances_sub, _, _ = select_candidates(
496
+ current_subset,
497
+ h_subset,
498
+ n_trials,
499
+ select=n_best_sub,
500
+ n_iter=2,
501
+ cov_computation_method=cov_computation_method,
502
+ random_state=random_state,
503
+ )
504
+ subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
505
+ all_best_locations[subset_slice] = best_locations_sub
506
+ all_best_covariances[subset_slice] = best_covariances_sub
507
+ # 2. Pool the candidate supports into a merged set
508
+ # (possibly the full dataset)
509
+ n_samples_merged = min(1500, n_samples)
510
+ h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
511
+ if n_samples > 1500:
512
+ n_best_merged = 10
513
+ else:
514
+ n_best_merged = 1
515
+ # find the best couples (location, covariance) on the merged set
516
+ selection = random_state.permutation(n_samples)[:n_samples_merged]
517
+ locations_merged, covariances_merged, supports_merged, d = select_candidates(
518
+ X[selection],
519
+ h_merged,
520
+ n_trials=(all_best_locations, all_best_covariances),
521
+ select=n_best_merged,
522
+ cov_computation_method=cov_computation_method,
523
+ random_state=random_state,
524
+ )
525
+ # 3. Finally get the overall best (locations, covariance) couple
526
+ if n_samples < 1500:
527
+ # directly get the best couple (location, covariance)
528
+ location = locations_merged[0]
529
+ covariance = covariances_merged[0]
530
+ support = np.zeros(n_samples, dtype=bool)
531
+ dist = np.zeros(n_samples)
532
+ support[selection] = supports_merged[0]
533
+ dist[selection] = d[0]
534
+ else:
535
+ # select the best couple on the full dataset
536
+ locations_full, covariances_full, supports_full, d = select_candidates(
537
+ X,
538
+ n_support,
539
+ n_trials=(locations_merged, covariances_merged),
540
+ select=1,
541
+ cov_computation_method=cov_computation_method,
542
+ random_state=random_state,
543
+ )
544
+ location = locations_full[0]
545
+ covariance = covariances_full[0]
546
+ support = supports_full[0]
547
+ dist = d[0]
548
+ elif n_features > 1:
549
+ # 1. Find the 10 best couples (location, covariance)
550
+ # considering two iterations
551
+ n_trials = 30
552
+ n_best = 10
553
+ locations_best, covariances_best, _, _ = select_candidates(
554
+ X,
555
+ n_support,
556
+ n_trials=n_trials,
557
+ select=n_best,
558
+ n_iter=2,
559
+ cov_computation_method=cov_computation_method,
560
+ random_state=random_state,
561
+ )
562
+ # 2. Select the best couple on the full dataset amongst the 10
563
+ locations_full, covariances_full, supports_full, d = select_candidates(
564
+ X,
565
+ n_support,
566
+ n_trials=(locations_best, covariances_best),
567
+ select=1,
568
+ cov_computation_method=cov_computation_method,
569
+ random_state=random_state,
570
+ )
571
+ location = locations_full[0]
572
+ covariance = covariances_full[0]
573
+ support = supports_full[0]
574
+ dist = d[0]
575
+
576
+ return location, covariance, support, dist
577
+
578
+
579
+ class MinCovDet(EmpiricalCovariance):
580
+ """Minimum Covariance Determinant (MCD): robust estimator of covariance.
581
+
582
+ The Minimum Covariance Determinant covariance estimator is to be applied
583
+ on Gaussian-distributed data, but could still be relevant on data
584
+ drawn from a unimodal, symmetric distribution. It is not meant to be used
585
+ with multi-modal data (the algorithm used to fit a MinCovDet object is
586
+ likely to fail in such a case).
587
+ One should consider projection pursuit methods to deal with multi-modal
588
+ datasets.
589
+
590
+ Read more in the :ref:`User Guide <robust_covariance>`.
591
+
592
+ Parameters
593
+ ----------
594
+ store_precision : bool, default=True
595
+ Specify if the estimated precision is stored.
596
+
597
+ assume_centered : bool, default=False
598
+ If True, the support of the robust location and the covariance
599
+ estimates is computed, and a covariance estimate is recomputed from
600
+ it, without centering the data.
601
+ Useful to work with data whose mean is significantly equal to
602
+ zero but is not exactly zero.
603
+ If False, the robust location and covariance are directly computed
604
+ with the FastMCD algorithm without additional treatment.
605
+
606
+ support_fraction : float, default=None
607
+ The proportion of points to be included in the support of the raw
608
+ MCD estimate. Default is None, which implies that the minimum
609
+ value of support_fraction will be used within the algorithm:
610
+ `(n_samples + n_features + 1) / 2 * n_samples`. The parameter must be
611
+ in the range (0, 1].
612
+
613
+ random_state : int, RandomState instance or None, default=None
614
+ Determines the pseudo random number generator for shuffling the data.
615
+ Pass an int for reproducible results across multiple function calls.
616
+ See :term:`Glossary <random_state>`.
617
+
618
+ Attributes
619
+ ----------
620
+ raw_location_ : ndarray of shape (n_features,)
621
+ The raw robust estimated location before correction and re-weighting.
622
+
623
+ raw_covariance_ : ndarray of shape (n_features, n_features)
624
+ The raw robust estimated covariance before correction and re-weighting.
625
+
626
+ raw_support_ : ndarray of shape (n_samples,)
627
+ A mask of the observations that have been used to compute
628
+ the raw robust estimates of location and shape, before correction
629
+ and re-weighting.
630
+
631
+ location_ : ndarray of shape (n_features,)
632
+ Estimated robust location.
633
+
634
+ covariance_ : ndarray of shape (n_features, n_features)
635
+ Estimated robust covariance matrix.
636
+
637
+ precision_ : ndarray of shape (n_features, n_features)
638
+ Estimated pseudo inverse matrix.
639
+ (stored only if store_precision is True)
640
+
641
+ support_ : ndarray of shape (n_samples,)
642
+ A mask of the observations that have been used to compute
643
+ the robust estimates of location and shape.
644
+
645
+ dist_ : ndarray of shape (n_samples,)
646
+ Mahalanobis distances of the training set (on which :meth:`fit` is
647
+ called) observations.
648
+
649
+ n_features_in_ : int
650
+ Number of features seen during :term:`fit`.
651
+
652
+ .. versionadded:: 0.24
653
+
654
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
655
+ Names of features seen during :term:`fit`. Defined only when `X`
656
+ has feature names that are all strings.
657
+
658
+ .. versionadded:: 1.0
659
+
660
+ See Also
661
+ --------
662
+ EllipticEnvelope : An object for detecting outliers in
663
+ a Gaussian distributed dataset.
664
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
665
+ GraphicalLasso : Sparse inverse covariance estimation
666
+ with an l1-penalized estimator.
667
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
668
+ choice of the l1 penalty.
669
+ LedoitWolf : LedoitWolf Estimator.
670
+ OAS : Oracle Approximating Shrinkage Estimator.
671
+ ShrunkCovariance : Covariance estimator with shrinkage.
672
+
673
+ References
674
+ ----------
675
+
676
+ .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression.
677
+ J. Am Stat Ass, 79:871, 1984.
678
+ .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant
679
+ Estimator, 1999, American Statistical Association and the American
680
+ Society for Quality, TECHNOMETRICS
681
+ .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun,
682
+ Asymptotics For The Minimum Covariance Determinant Estimator,
683
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
684
+
685
+ Examples
686
+ --------
687
+ >>> import numpy as np
688
+ >>> from sklearn.covariance import MinCovDet
689
+ >>> from sklearn.datasets import make_gaussian_quantiles
690
+ >>> real_cov = np.array([[.8, .3],
691
+ ... [.3, .4]])
692
+ >>> rng = np.random.RandomState(0)
693
+ >>> X = rng.multivariate_normal(mean=[0, 0],
694
+ ... cov=real_cov,
695
+ ... size=500)
696
+ >>> cov = MinCovDet(random_state=0).fit(X)
697
+ >>> cov.covariance_
698
+ array([[0.7411..., 0.2535...],
699
+ [0.2535..., 0.3053...]])
700
+ >>> cov.location_
701
+ array([0.0813... , 0.0427...])
702
+ """
703
+
704
+ _parameter_constraints: dict = {
705
+ **EmpiricalCovariance._parameter_constraints,
706
+ "support_fraction": [Interval(Real, 0, 1, closed="right"), None],
707
+ "random_state": ["random_state"],
708
+ }
709
+ _nonrobust_covariance = staticmethod(empirical_covariance)
710
+
711
+ def __init__(
712
+ self,
713
+ *,
714
+ store_precision=True,
715
+ assume_centered=False,
716
+ support_fraction=None,
717
+ random_state=None,
718
+ ):
719
+ self.store_precision = store_precision
720
+ self.assume_centered = assume_centered
721
+ self.support_fraction = support_fraction
722
+ self.random_state = random_state
723
+
724
+ @_fit_context(prefer_skip_nested_validation=True)
725
+ def fit(self, X, y=None):
726
+ """Fit a Minimum Covariance Determinant with the FastMCD algorithm.
727
+
728
+ Parameters
729
+ ----------
730
+ X : array-like of shape (n_samples, n_features)
731
+ Training data, where `n_samples` is the number of samples
732
+ and `n_features` is the number of features.
733
+
734
+ y : Ignored
735
+ Not used, present for API consistency by convention.
736
+
737
+ Returns
738
+ -------
739
+ self : object
740
+ Returns the instance itself.
741
+ """
742
+ X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet")
743
+ random_state = check_random_state(self.random_state)
744
+ n_samples, n_features = X.shape
745
+ # check that the empirical covariance is full rank
746
+ if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
747
+ warnings.warn(
748
+ "The covariance matrix associated to your dataset is not full rank"
749
+ )
750
+ # compute and store raw estimates
751
+ raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
752
+ X,
753
+ support_fraction=self.support_fraction,
754
+ cov_computation_method=self._nonrobust_covariance,
755
+ random_state=random_state,
756
+ )
757
+ if self.assume_centered:
758
+ raw_location = np.zeros(n_features)
759
+ raw_covariance = self._nonrobust_covariance(
760
+ X[raw_support], assume_centered=True
761
+ )
762
+ # get precision matrix in an optimized way
763
+ precision = linalg.pinvh(raw_covariance)
764
+ raw_dist = np.sum(np.dot(X, precision) * X, 1)
765
+ self.raw_location_ = raw_location
766
+ self.raw_covariance_ = raw_covariance
767
+ self.raw_support_ = raw_support
768
+ self.location_ = raw_location
769
+ self.support_ = raw_support
770
+ self.dist_ = raw_dist
771
+ # obtain consistency at normal models
772
+ self.correct_covariance(X)
773
+ # re-weight estimator
774
+ self.reweight_covariance(X)
775
+
776
+ return self
777
+
778
+ def correct_covariance(self, data):
779
+ """Apply a correction to raw Minimum Covariance Determinant estimates.
780
+
781
+ Correction using the empirical correction factor suggested
782
+ by Rousseeuw and Van Driessen in [RVD]_.
783
+
784
+ Parameters
785
+ ----------
786
+ data : array-like of shape (n_samples, n_features)
787
+ The data matrix, with p features and n samples.
788
+ The data set must be the one which was used to compute
789
+ the raw estimates.
790
+
791
+ Returns
792
+ -------
793
+ covariance_corrected : ndarray of shape (n_features, n_features)
794
+ Corrected robust covariance estimate.
795
+
796
+ References
797
+ ----------
798
+
799
+ .. [RVD] A Fast Algorithm for the Minimum Covariance
800
+ Determinant Estimator, 1999, American Statistical Association
801
+ and the American Society for Quality, TECHNOMETRICS
802
+ """
803
+
804
+ # Check that the covariance of the support data is not equal to 0.
805
+ # Otherwise self.dist_ = 0 and thus correction = 0.
806
+ n_samples = len(self.dist_)
807
+ n_support = np.sum(self.support_)
808
+ if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
809
+ raise ValueError(
810
+ "The covariance matrix of the support data "
811
+ "is equal to 0, try to increase support_fraction"
812
+ )
813
+ correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
814
+ covariance_corrected = self.raw_covariance_ * correction
815
+ self.dist_ /= correction
816
+ return covariance_corrected
817
+
818
+ def reweight_covariance(self, data):
819
+ """Re-weight raw Minimum Covariance Determinant estimates.
820
+
821
+ Re-weight observations using Rousseeuw's method (equivalent to
822
+ deleting outlying observations from the data set before
823
+ computing location and covariance estimates) described
824
+ in [RVDriessen]_.
825
+
826
+ Parameters
827
+ ----------
828
+ data : array-like of shape (n_samples, n_features)
829
+ The data matrix, with p features and n samples.
830
+ The data set must be the one which was used to compute
831
+ the raw estimates.
832
+
833
+ Returns
834
+ -------
835
+ location_reweighted : ndarray of shape (n_features,)
836
+ Re-weighted robust location estimate.
837
+
838
+ covariance_reweighted : ndarray of shape (n_features, n_features)
839
+ Re-weighted robust covariance estimate.
840
+
841
+ support_reweighted : ndarray of shape (n_samples,), dtype=bool
842
+ A mask of the observations that have been used to compute
843
+ the re-weighted robust location and covariance estimates.
844
+
845
+ References
846
+ ----------
847
+
848
+ .. [RVDriessen] A Fast Algorithm for the Minimum Covariance
849
+ Determinant Estimator, 1999, American Statistical Association
850
+ and the American Society for Quality, TECHNOMETRICS
851
+ """
852
+ n_samples, n_features = data.shape
853
+ mask = self.dist_ < chi2(n_features).isf(0.025)
854
+ if self.assume_centered:
855
+ location_reweighted = np.zeros(n_features)
856
+ else:
857
+ location_reweighted = data[mask].mean(0)
858
+ covariance_reweighted = self._nonrobust_covariance(
859
+ data[mask], assume_centered=self.assume_centered
860
+ )
861
+ support_reweighted = np.zeros(n_samples, dtype=bool)
862
+ support_reweighted[mask] = True
863
+ self._set_covariance(covariance_reweighted)
864
+ self.location_ = location_reweighted
865
+ self.support_ = support_reweighted
866
+ X_centered = data - self.location_
867
+ self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1)
868
+ return location_reweighted, covariance_reweighted, support_reweighted
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py ADDED
@@ -0,0 +1,816 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Covariance estimators using shrinkage.
3
+
4
+ Shrinkage corresponds to regularising `cov` using a convex combination:
5
+ shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
6
+
7
+ """
8
+
9
+ # Author: Alexandre Gramfort <[email protected]>
10
+ # Gael Varoquaux <[email protected]>
11
+ # Virgile Fritsch <[email protected]>
12
+ #
13
+ # License: BSD 3 clause
14
+
15
+ # avoid division truncation
16
+ import warnings
17
+ from numbers import Integral, Real
18
+
19
+ import numpy as np
20
+
21
+ from ..base import _fit_context
22
+ from ..utils import check_array
23
+ from ..utils._param_validation import Interval, validate_params
24
+ from . import EmpiricalCovariance, empirical_covariance
25
+
26
+
27
+ def _ledoit_wolf(X, *, assume_centered, block_size):
28
+ """Estimate the shrunk Ledoit-Wolf covariance matrix."""
29
+ # for only one feature, the result is the same whatever the shrinkage
30
+ if len(X.shape) == 2 and X.shape[1] == 1:
31
+ if not assume_centered:
32
+ X = X - X.mean()
33
+ return np.atleast_2d((X**2).mean()), 0.0
34
+ n_features = X.shape[1]
35
+
36
+ # get Ledoit-Wolf shrinkage
37
+ shrinkage = ledoit_wolf_shrinkage(
38
+ X, assume_centered=assume_centered, block_size=block_size
39
+ )
40
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
41
+ mu = np.sum(np.trace(emp_cov)) / n_features
42
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
43
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
44
+
45
+ return shrunk_cov, shrinkage
46
+
47
+
48
+ def _oas(X, *, assume_centered=False):
49
+ """Estimate covariance with the Oracle Approximating Shrinkage algorithm.
50
+
51
+ The formulation is based on [1]_.
52
+ [1] "Shrinkage algorithms for MMSE covariance estimation.",
53
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
54
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
55
+ https://arxiv.org/pdf/0907.4698.pdf
56
+ """
57
+ if len(X.shape) == 2 and X.shape[1] == 1:
58
+ # for only one feature, the result is the same whatever the shrinkage
59
+ if not assume_centered:
60
+ X = X - X.mean()
61
+ return np.atleast_2d((X**2).mean()), 0.0
62
+
63
+ n_samples, n_features = X.shape
64
+
65
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
66
+
67
+ # The shrinkage is defined as:
68
+ # shrinkage = min(
69
+ # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1
70
+ # )
71
+ # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]).
72
+ # The factor 2 / p is omitted since it does not impact the value of the estimator
73
+ # for large p.
74
+
75
+ # Instead of computing trace(S)**2, we can compute the average of the squared
76
+ # elements of S that is equal to trace(S)**2 / p**2.
77
+ # See the definition of the Frobenius norm:
78
+ # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
79
+ alpha = np.mean(emp_cov**2)
80
+ mu = np.trace(emp_cov) / n_features
81
+ mu_squared = mu**2
82
+
83
+ # The factor 1 / p**2 will cancel out since it is in both the numerator and
84
+ # denominator
85
+ num = alpha + mu_squared
86
+ den = (n_samples + 1) * (alpha - mu_squared / n_features)
87
+ shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
88
+
89
+ # The shrunk covariance is defined as:
90
+ # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1])
91
+ # where S is the empirical covariance and F is the shrinkage target defined as
92
+ # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1])
93
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
94
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
95
+
96
+ return shrunk_cov, shrinkage
97
+
98
+
99
+ ###############################################################################
100
+ # Public API
101
+ # ShrunkCovariance estimator
102
+
103
+
104
+ @validate_params(
105
+ {
106
+ "emp_cov": ["array-like"],
107
+ "shrinkage": [Interval(Real, 0, 1, closed="both")],
108
+ },
109
+ prefer_skip_nested_validation=True,
110
+ )
111
+ def shrunk_covariance(emp_cov, shrinkage=0.1):
112
+ """Calculate covariance matrices shrunk on the diagonal.
113
+
114
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
115
+
116
+ Parameters
117
+ ----------
118
+ emp_cov : array-like of shape (..., n_features, n_features)
119
+ Covariance matrices to be shrunk, at least 2D ndarray.
120
+
121
+ shrinkage : float, default=0.1
122
+ Coefficient in the convex combination used for the computation
123
+ of the shrunk estimate. Range is [0, 1].
124
+
125
+ Returns
126
+ -------
127
+ shrunk_cov : ndarray of shape (..., n_features, n_features)
128
+ Shrunk covariance matrices.
129
+
130
+ Notes
131
+ -----
132
+ The regularized (shrunk) covariance is given by::
133
+
134
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
135
+
136
+ where `mu = trace(cov) / n_features`.
137
+
138
+ Examples
139
+ --------
140
+ >>> import numpy as np
141
+ >>> from sklearn.datasets import make_gaussian_quantiles
142
+ >>> from sklearn.covariance import empirical_covariance, shrunk_covariance
143
+ >>> real_cov = np.array([[.8, .3], [.3, .4]])
144
+ >>> rng = np.random.RandomState(0)
145
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
146
+ >>> shrunk_covariance(empirical_covariance(X))
147
+ array([[0.73..., 0.25...],
148
+ [0.25..., 0.41...]])
149
+ """
150
+ emp_cov = check_array(emp_cov, allow_nd=True)
151
+ n_features = emp_cov.shape[-1]
152
+
153
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
154
+ mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features
155
+ mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim)))
156
+ shrunk_cov += shrinkage * mu * np.eye(n_features)
157
+
158
+ return shrunk_cov
159
+
160
+
161
+ class ShrunkCovariance(EmpiricalCovariance):
162
+ """Covariance estimator with shrinkage.
163
+
164
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
165
+
166
+ Parameters
167
+ ----------
168
+ store_precision : bool, default=True
169
+ Specify if the estimated precision is stored.
170
+
171
+ assume_centered : bool, default=False
172
+ If True, data will not be centered before computation.
173
+ Useful when working with data whose mean is almost, but not exactly
174
+ zero.
175
+ If False, data will be centered before computation.
176
+
177
+ shrinkage : float, default=0.1
178
+ Coefficient in the convex combination used for the computation
179
+ of the shrunk estimate. Range is [0, 1].
180
+
181
+ Attributes
182
+ ----------
183
+ covariance_ : ndarray of shape (n_features, n_features)
184
+ Estimated covariance matrix
185
+
186
+ location_ : ndarray of shape (n_features,)
187
+ Estimated location, i.e. the estimated mean.
188
+
189
+ precision_ : ndarray of shape (n_features, n_features)
190
+ Estimated pseudo inverse matrix.
191
+ (stored only if store_precision is True)
192
+
193
+ n_features_in_ : int
194
+ Number of features seen during :term:`fit`.
195
+
196
+ .. versionadded:: 0.24
197
+
198
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
199
+ Names of features seen during :term:`fit`. Defined only when `X`
200
+ has feature names that are all strings.
201
+
202
+ .. versionadded:: 1.0
203
+
204
+ See Also
205
+ --------
206
+ EllipticEnvelope : An object for detecting outliers in
207
+ a Gaussian distributed dataset.
208
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
209
+ GraphicalLasso : Sparse inverse covariance estimation
210
+ with an l1-penalized estimator.
211
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
212
+ choice of the l1 penalty.
213
+ LedoitWolf : LedoitWolf Estimator.
214
+ MinCovDet : Minimum Covariance Determinant
215
+ (robust estimator of covariance).
216
+ OAS : Oracle Approximating Shrinkage Estimator.
217
+
218
+ Notes
219
+ -----
220
+ The regularized covariance is given by:
221
+
222
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
223
+
224
+ where mu = trace(cov) / n_features
225
+
226
+ Examples
227
+ --------
228
+ >>> import numpy as np
229
+ >>> from sklearn.covariance import ShrunkCovariance
230
+ >>> from sklearn.datasets import make_gaussian_quantiles
231
+ >>> real_cov = np.array([[.8, .3],
232
+ ... [.3, .4]])
233
+ >>> rng = np.random.RandomState(0)
234
+ >>> X = rng.multivariate_normal(mean=[0, 0],
235
+ ... cov=real_cov,
236
+ ... size=500)
237
+ >>> cov = ShrunkCovariance().fit(X)
238
+ >>> cov.covariance_
239
+ array([[0.7387..., 0.2536...],
240
+ [0.2536..., 0.4110...]])
241
+ >>> cov.location_
242
+ array([0.0622..., 0.0193...])
243
+ """
244
+
245
+ _parameter_constraints: dict = {
246
+ **EmpiricalCovariance._parameter_constraints,
247
+ "shrinkage": [Interval(Real, 0, 1, closed="both")],
248
+ }
249
+
250
+ def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1):
251
+ super().__init__(
252
+ store_precision=store_precision, assume_centered=assume_centered
253
+ )
254
+ self.shrinkage = shrinkage
255
+
256
+ @_fit_context(prefer_skip_nested_validation=True)
257
+ def fit(self, X, y=None):
258
+ """Fit the shrunk covariance model to X.
259
+
260
+ Parameters
261
+ ----------
262
+ X : array-like of shape (n_samples, n_features)
263
+ Training data, where `n_samples` is the number of samples
264
+ and `n_features` is the number of features.
265
+
266
+ y : Ignored
267
+ Not used, present for API consistency by convention.
268
+
269
+ Returns
270
+ -------
271
+ self : object
272
+ Returns the instance itself.
273
+ """
274
+ X = self._validate_data(X)
275
+ # Not calling the parent object to fit, to avoid a potential
276
+ # matrix inversion when setting the precision
277
+ if self.assume_centered:
278
+ self.location_ = np.zeros(X.shape[1])
279
+ else:
280
+ self.location_ = X.mean(0)
281
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
282
+ covariance = shrunk_covariance(covariance, self.shrinkage)
283
+ self._set_covariance(covariance)
284
+
285
+ return self
286
+
287
+
288
+ # Ledoit-Wolf estimator
289
+
290
+
291
+ @validate_params(
292
+ {
293
+ "X": ["array-like"],
294
+ "assume_centered": ["boolean"],
295
+ "block_size": [Interval(Integral, 1, None, closed="left")],
296
+ },
297
+ prefer_skip_nested_validation=True,
298
+ )
299
+ def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
300
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
301
+
302
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
303
+
304
+ Parameters
305
+ ----------
306
+ X : array-like of shape (n_samples, n_features)
307
+ Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
308
+
309
+ assume_centered : bool, default=False
310
+ If True, data will not be centered before computation.
311
+ Useful to work with data whose mean is significantly equal to
312
+ zero but is not exactly zero.
313
+ If False, data will be centered before computation.
314
+
315
+ block_size : int, default=1000
316
+ Size of blocks into which the covariance matrix will be split.
317
+
318
+ Returns
319
+ -------
320
+ shrinkage : float
321
+ Coefficient in the convex combination used for the computation
322
+ of the shrunk estimate.
323
+
324
+ Notes
325
+ -----
326
+ The regularized (shrunk) covariance is:
327
+
328
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
329
+
330
+ where mu = trace(cov) / n_features
331
+
332
+ Examples
333
+ --------
334
+ >>> import numpy as np
335
+ >>> from sklearn.covariance import ledoit_wolf_shrinkage
336
+ >>> real_cov = np.array([[.4, .2], [.2, .8]])
337
+ >>> rng = np.random.RandomState(0)
338
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
339
+ >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X)
340
+ >>> shrinkage_coefficient
341
+ 0.23...
342
+ """
343
+ X = check_array(X)
344
+ # for only one feature, the result is the same whatever the shrinkage
345
+ if len(X.shape) == 2 and X.shape[1] == 1:
346
+ return 0.0
347
+ if X.ndim == 1:
348
+ X = np.reshape(X, (1, -1))
349
+
350
+ if X.shape[0] == 1:
351
+ warnings.warn(
352
+ "Only one sample available. You may want to reshape your data array"
353
+ )
354
+ n_samples, n_features = X.shape
355
+
356
+ # optionally center data
357
+ if not assume_centered:
358
+ X = X - X.mean(0)
359
+
360
+ # A non-blocked version of the computation is present in the tests
361
+ # in tests/test_covariance.py
362
+
363
+ # number of blocks to split the covariance matrix into
364
+ n_splits = int(n_features / block_size)
365
+ X2 = X**2
366
+ emp_cov_trace = np.sum(X2, axis=0) / n_samples
367
+ mu = np.sum(emp_cov_trace) / n_features
368
+ beta_ = 0.0 # sum of the coefficients of <X2.T, X2>
369
+ delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X>
370
+ # starting block computation
371
+ for i in range(n_splits):
372
+ for j in range(n_splits):
373
+ rows = slice(block_size * i, block_size * (i + 1))
374
+ cols = slice(block_size * j, block_size * (j + 1))
375
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
376
+ delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
377
+ rows = slice(block_size * i, block_size * (i + 1))
378
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :]))
379
+ delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2)
380
+ for j in range(n_splits):
381
+ cols = slice(block_size * j, block_size * (j + 1))
382
+ beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols]))
383
+ delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2)
384
+ delta_ += np.sum(
385
+ np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2
386
+ )
387
+ delta_ /= n_samples**2
388
+ beta_ += np.sum(
389
+ np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :])
390
+ )
391
+ # use delta_ to compute beta
392
+ beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_)
393
+ # delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
394
+ delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2
395
+ delta /= n_features
396
+ # get final beta as the min between beta and delta
397
+ # We do this to prevent shrinking more than "1", which would invert
398
+ # the value of covariances
399
+ beta = min(beta, delta)
400
+ # finally get shrinkage
401
+ shrinkage = 0 if beta == 0 else beta / delta
402
+ return shrinkage
403
+
404
+
405
+ @validate_params(
406
+ {"X": ["array-like"]},
407
+ prefer_skip_nested_validation=False,
408
+ )
409
+ def ledoit_wolf(X, *, assume_centered=False, block_size=1000):
410
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
411
+
412
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
413
+
414
+ Parameters
415
+ ----------
416
+ X : array-like of shape (n_samples, n_features)
417
+ Data from which to compute the covariance estimate.
418
+
419
+ assume_centered : bool, default=False
420
+ If True, data will not be centered before computation.
421
+ Useful to work with data whose mean is significantly equal to
422
+ zero but is not exactly zero.
423
+ If False, data will be centered before computation.
424
+
425
+ block_size : int, default=1000
426
+ Size of blocks into which the covariance matrix will be split.
427
+ This is purely a memory optimization and does not affect results.
428
+
429
+ Returns
430
+ -------
431
+ shrunk_cov : ndarray of shape (n_features, n_features)
432
+ Shrunk covariance.
433
+
434
+ shrinkage : float
435
+ Coefficient in the convex combination used for the computation
436
+ of the shrunk estimate.
437
+
438
+ Notes
439
+ -----
440
+ The regularized (shrunk) covariance is:
441
+
442
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
443
+
444
+ where mu = trace(cov) / n_features
445
+
446
+ Examples
447
+ --------
448
+ >>> import numpy as np
449
+ >>> from sklearn.covariance import empirical_covariance, ledoit_wolf
450
+ >>> real_cov = np.array([[.4, .2], [.2, .8]])
451
+ >>> rng = np.random.RandomState(0)
452
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
453
+ >>> covariance, shrinkage = ledoit_wolf(X)
454
+ >>> covariance
455
+ array([[0.44..., 0.16...],
456
+ [0.16..., 0.80...]])
457
+ >>> shrinkage
458
+ 0.23...
459
+ """
460
+ estimator = LedoitWolf(
461
+ assume_centered=assume_centered,
462
+ block_size=block_size,
463
+ store_precision=False,
464
+ ).fit(X)
465
+
466
+ return estimator.covariance_, estimator.shrinkage_
467
+
468
+
469
+ class LedoitWolf(EmpiricalCovariance):
470
+ """LedoitWolf Estimator.
471
+
472
+ Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
473
+ coefficient is computed using O. Ledoit and M. Wolf's formula as
474
+ described in "A Well-Conditioned Estimator for Large-Dimensional
475
+ Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
476
+ Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
477
+
478
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
479
+
480
+ Parameters
481
+ ----------
482
+ store_precision : bool, default=True
483
+ Specify if the estimated precision is stored.
484
+
485
+ assume_centered : bool, default=False
486
+ If True, data will not be centered before computation.
487
+ Useful when working with data whose mean is almost, but not exactly
488
+ zero.
489
+ If False (default), data will be centered before computation.
490
+
491
+ block_size : int, default=1000
492
+ Size of blocks into which the covariance matrix will be split
493
+ during its Ledoit-Wolf estimation. This is purely a memory
494
+ optimization and does not affect results.
495
+
496
+ Attributes
497
+ ----------
498
+ covariance_ : ndarray of shape (n_features, n_features)
499
+ Estimated covariance matrix.
500
+
501
+ location_ : ndarray of shape (n_features,)
502
+ Estimated location, i.e. the estimated mean.
503
+
504
+ precision_ : ndarray of shape (n_features, n_features)
505
+ Estimated pseudo inverse matrix.
506
+ (stored only if store_precision is True)
507
+
508
+ shrinkage_ : float
509
+ Coefficient in the convex combination used for the computation
510
+ of the shrunk estimate. Range is [0, 1].
511
+
512
+ n_features_in_ : int
513
+ Number of features seen during :term:`fit`.
514
+
515
+ .. versionadded:: 0.24
516
+
517
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
518
+ Names of features seen during :term:`fit`. Defined only when `X`
519
+ has feature names that are all strings.
520
+
521
+ .. versionadded:: 1.0
522
+
523
+ See Also
524
+ --------
525
+ EllipticEnvelope : An object for detecting outliers in
526
+ a Gaussian distributed dataset.
527
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
528
+ GraphicalLasso : Sparse inverse covariance estimation
529
+ with an l1-penalized estimator.
530
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
531
+ choice of the l1 penalty.
532
+ MinCovDet : Minimum Covariance Determinant
533
+ (robust estimator of covariance).
534
+ OAS : Oracle Approximating Shrinkage Estimator.
535
+ ShrunkCovariance : Covariance estimator with shrinkage.
536
+
537
+ Notes
538
+ -----
539
+ The regularised covariance is:
540
+
541
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
542
+
543
+ where mu = trace(cov) / n_features
544
+ and shrinkage is given by the Ledoit and Wolf formula (see References)
545
+
546
+ References
547
+ ----------
548
+ "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
549
+ Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
550
+ February 2004, pages 365-411.
551
+
552
+ Examples
553
+ --------
554
+ >>> import numpy as np
555
+ >>> from sklearn.covariance import LedoitWolf
556
+ >>> real_cov = np.array([[.4, .2],
557
+ ... [.2, .8]])
558
+ >>> np.random.seed(0)
559
+ >>> X = np.random.multivariate_normal(mean=[0, 0],
560
+ ... cov=real_cov,
561
+ ... size=50)
562
+ >>> cov = LedoitWolf().fit(X)
563
+ >>> cov.covariance_
564
+ array([[0.4406..., 0.1616...],
565
+ [0.1616..., 0.8022...]])
566
+ >>> cov.location_
567
+ array([ 0.0595... , -0.0075...])
568
+ """
569
+
570
+ _parameter_constraints: dict = {
571
+ **EmpiricalCovariance._parameter_constraints,
572
+ "block_size": [Interval(Integral, 1, None, closed="left")],
573
+ }
574
+
575
+ def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000):
576
+ super().__init__(
577
+ store_precision=store_precision, assume_centered=assume_centered
578
+ )
579
+ self.block_size = block_size
580
+
581
+ @_fit_context(prefer_skip_nested_validation=True)
582
+ def fit(self, X, y=None):
583
+ """Fit the Ledoit-Wolf shrunk covariance model to X.
584
+
585
+ Parameters
586
+ ----------
587
+ X : array-like of shape (n_samples, n_features)
588
+ Training data, where `n_samples` is the number of samples
589
+ and `n_features` is the number of features.
590
+ y : Ignored
591
+ Not used, present for API consistency by convention.
592
+
593
+ Returns
594
+ -------
595
+ self : object
596
+ Returns the instance itself.
597
+ """
598
+ # Not calling the parent object to fit, to avoid computing the
599
+ # covariance matrix (and potentially the precision)
600
+ X = self._validate_data(X)
601
+ if self.assume_centered:
602
+ self.location_ = np.zeros(X.shape[1])
603
+ else:
604
+ self.location_ = X.mean(0)
605
+ covariance, shrinkage = _ledoit_wolf(
606
+ X - self.location_, assume_centered=True, block_size=self.block_size
607
+ )
608
+ self.shrinkage_ = shrinkage
609
+ self._set_covariance(covariance)
610
+
611
+ return self
612
+
613
+
614
+ # OAS estimator
615
+ @validate_params(
616
+ {"X": ["array-like"]},
617
+ prefer_skip_nested_validation=False,
618
+ )
619
+ def oas(X, *, assume_centered=False):
620
+ """Estimate covariance with the Oracle Approximating Shrinkage as proposed in [1]_.
621
+
622
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
623
+
624
+ Parameters
625
+ ----------
626
+ X : array-like of shape (n_samples, n_features)
627
+ Data from which to compute the covariance estimate.
628
+
629
+ assume_centered : bool, default=False
630
+ If True, data will not be centered before computation.
631
+ Useful to work with data whose mean is significantly equal to
632
+ zero but is not exactly zero.
633
+ If False, data will be centered before computation.
634
+
635
+ Returns
636
+ -------
637
+ shrunk_cov : array-like of shape (n_features, n_features)
638
+ Shrunk covariance.
639
+
640
+ shrinkage : float
641
+ Coefficient in the convex combination used for the computation
642
+ of the shrunk estimate.
643
+
644
+ Notes
645
+ -----
646
+ The regularised covariance is:
647
+
648
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
649
+
650
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
651
+ (see [1]_).
652
+
653
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
654
+ the original article, formula (23) states that 2/p (p being the number of
655
+ features) is multiplied by Trace(cov*cov) in both the numerator and
656
+ denominator, but this operation is omitted because for a large p, the value
657
+ of 2/p is so small that it doesn't affect the value of the estimator.
658
+
659
+ References
660
+ ----------
661
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
662
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
663
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
664
+ <0907.4698>`
665
+
666
+ Examples
667
+ --------
668
+ >>> import numpy as np
669
+ >>> from sklearn.covariance import oas
670
+ >>> rng = np.random.RandomState(0)
671
+ >>> real_cov = [[.8, .3], [.3, .4]]
672
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
673
+ >>> shrunk_cov, shrinkage = oas(X)
674
+ >>> shrunk_cov
675
+ array([[0.7533..., 0.2763...],
676
+ [0.2763..., 0.3964...]])
677
+ >>> shrinkage
678
+ 0.0195...
679
+ """
680
+ estimator = OAS(
681
+ assume_centered=assume_centered,
682
+ ).fit(X)
683
+ return estimator.covariance_, estimator.shrinkage_
684
+
685
+
686
+ class OAS(EmpiricalCovariance):
687
+ """Oracle Approximating Shrinkage Estimator as proposed in [1]_.
688
+
689
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
690
+
691
+ Parameters
692
+ ----------
693
+ store_precision : bool, default=True
694
+ Specify if the estimated precision is stored.
695
+
696
+ assume_centered : bool, default=False
697
+ If True, data will not be centered before computation.
698
+ Useful when working with data whose mean is almost, but not exactly
699
+ zero.
700
+ If False (default), data will be centered before computation.
701
+
702
+ Attributes
703
+ ----------
704
+ covariance_ : ndarray of shape (n_features, n_features)
705
+ Estimated covariance matrix.
706
+
707
+ location_ : ndarray of shape (n_features,)
708
+ Estimated location, i.e. the estimated mean.
709
+
710
+ precision_ : ndarray of shape (n_features, n_features)
711
+ Estimated pseudo inverse matrix.
712
+ (stored only if store_precision is True)
713
+
714
+ shrinkage_ : float
715
+ coefficient in the convex combination used for the computation
716
+ of the shrunk estimate. Range is [0, 1].
717
+
718
+ n_features_in_ : int
719
+ Number of features seen during :term:`fit`.
720
+
721
+ .. versionadded:: 0.24
722
+
723
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
724
+ Names of features seen during :term:`fit`. Defined only when `X`
725
+ has feature names that are all strings.
726
+
727
+ .. versionadded:: 1.0
728
+
729
+ See Also
730
+ --------
731
+ EllipticEnvelope : An object for detecting outliers in
732
+ a Gaussian distributed dataset.
733
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
734
+ GraphicalLasso : Sparse inverse covariance estimation
735
+ with an l1-penalized estimator.
736
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
737
+ choice of the l1 penalty.
738
+ LedoitWolf : LedoitWolf Estimator.
739
+ MinCovDet : Minimum Covariance Determinant
740
+ (robust estimator of covariance).
741
+ ShrunkCovariance : Covariance estimator with shrinkage.
742
+
743
+ Notes
744
+ -----
745
+ The regularised covariance is:
746
+
747
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
748
+
749
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
750
+ (see [1]_).
751
+
752
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
753
+ the original article, formula (23) states that 2/p (p being the number of
754
+ features) is multiplied by Trace(cov*cov) in both the numerator and
755
+ denominator, but this operation is omitted because for a large p, the value
756
+ of 2/p is so small that it doesn't affect the value of the estimator.
757
+
758
+ References
759
+ ----------
760
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
761
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
762
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
763
+ <0907.4698>`
764
+
765
+ Examples
766
+ --------
767
+ >>> import numpy as np
768
+ >>> from sklearn.covariance import OAS
769
+ >>> from sklearn.datasets import make_gaussian_quantiles
770
+ >>> real_cov = np.array([[.8, .3],
771
+ ... [.3, .4]])
772
+ >>> rng = np.random.RandomState(0)
773
+ >>> X = rng.multivariate_normal(mean=[0, 0],
774
+ ... cov=real_cov,
775
+ ... size=500)
776
+ >>> oas = OAS().fit(X)
777
+ >>> oas.covariance_
778
+ array([[0.7533..., 0.2763...],
779
+ [0.2763..., 0.3964...]])
780
+ >>> oas.precision_
781
+ array([[ 1.7833..., -1.2431... ],
782
+ [-1.2431..., 3.3889...]])
783
+ >>> oas.shrinkage_
784
+ 0.0195...
785
+ """
786
+
787
+ @_fit_context(prefer_skip_nested_validation=True)
788
+ def fit(self, X, y=None):
789
+ """Fit the Oracle Approximating Shrinkage covariance model to X.
790
+
791
+ Parameters
792
+ ----------
793
+ X : array-like of shape (n_samples, n_features)
794
+ Training data, where `n_samples` is the number of samples
795
+ and `n_features` is the number of features.
796
+ y : Ignored
797
+ Not used, present for API consistency by convention.
798
+
799
+ Returns
800
+ -------
801
+ self : object
802
+ Returns the instance itself.
803
+ """
804
+ X = self._validate_data(X)
805
+ # Not calling the parent object to fit, to avoid computing the
806
+ # covariance matrix (and potentially the precision)
807
+ if self.assume_centered:
808
+ self.location_ = np.zeros(X.shape[1])
809
+ else:
810
+ self.location_ = X.mean(0)
811
+
812
+ covariance, shrinkage = _oas(X - self.location_, assume_centered=True)
813
+ self.shrinkage_ = shrinkage
814
+ self._set_covariance(covariance)
815
+
816
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc ADDED
Binary file (89.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc ADDED
Binary file (65.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc ADDED
Binary file (62.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc ADDED
Binary file (31.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc ADDED
Binary file (7.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc ADDED
Binary file (7.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc ADDED
Binary file (73.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc ADDED
Binary file (9.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc ADDED
Binary file (67.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Various bayesian regression
3
+ """
4
+
5
+ # Authors: V. Michel, F. Pedregosa, A. Gramfort
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from math import log
10
+ from numbers import Integral, Real
11
+
12
+ import numpy as np
13
+ from scipy import linalg
14
+ from scipy.linalg import pinvh
15
+
16
+ from ..base import RegressorMixin, _fit_context
17
+ from ..utils import _safe_indexing
18
+ from ..utils._param_validation import Hidden, Interval, StrOptions
19
+ from ..utils.extmath import fast_logdet
20
+ from ..utils.validation import _check_sample_weight
21
+ from ._base import LinearModel, _preprocess_data, _rescale_data
22
+
23
+
24
+ # TODO(1.5) Remove
25
+ def _deprecate_n_iter(n_iter, max_iter):
26
+ """Deprecates n_iter in favour of max_iter. Checks if the n_iter has been
27
+ used instead of max_iter and generates a deprecation warning if True.
28
+
29
+ Parameters
30
+ ----------
31
+ n_iter : int,
32
+ Value of n_iter attribute passed by the estimator.
33
+
34
+ max_iter : int, default=None
35
+ Value of max_iter attribute passed by the estimator.
36
+ If `None`, it corresponds to `max_iter=300`.
37
+
38
+ Returns
39
+ -------
40
+ max_iter : int,
41
+ Value of max_iter which shall further be used by the estimator.
42
+
43
+ Notes
44
+ -----
45
+ This function should be completely removed in 1.5.
46
+ """
47
+ if n_iter != "deprecated":
48
+ if max_iter is not None:
49
+ raise ValueError(
50
+ "Both `n_iter` and `max_iter` attributes were set. Attribute"
51
+ " `n_iter` was deprecated in version 1.3 and will be removed in"
52
+ " 1.5. To avoid this error, only set the `max_iter` attribute."
53
+ )
54
+ warnings.warn(
55
+ (
56
+ "'n_iter' was renamed to 'max_iter' in version 1.3 and "
57
+ "will be removed in 1.5"
58
+ ),
59
+ FutureWarning,
60
+ )
61
+ max_iter = n_iter
62
+ elif max_iter is None:
63
+ max_iter = 300
64
+ return max_iter
65
+
66
+
67
+ ###############################################################################
68
+ # BayesianRidge regression
69
+
70
+
71
+ class BayesianRidge(RegressorMixin, LinearModel):
72
+ """Bayesian ridge regression.
73
+
74
+ Fit a Bayesian ridge model. See the Notes section for details on this
75
+ implementation and the optimization of the regularization parameters
76
+ lambda (precision of the weights) and alpha (precision of the noise).
77
+
78
+ Read more in the :ref:`User Guide <bayesian_regression>`.
79
+
80
+ Parameters
81
+ ----------
82
+ max_iter : int, default=None
83
+ Maximum number of iterations over the complete dataset before
84
+ stopping independently of any early stopping criterion. If `None`, it
85
+ corresponds to `max_iter=300`.
86
+
87
+ .. versionchanged:: 1.3
88
+
89
+ tol : float, default=1e-3
90
+ Stop the algorithm if w has converged.
91
+
92
+ alpha_1 : float, default=1e-6
93
+ Hyper-parameter : shape parameter for the Gamma distribution prior
94
+ over the alpha parameter.
95
+
96
+ alpha_2 : float, default=1e-6
97
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
98
+ Gamma distribution prior over the alpha parameter.
99
+
100
+ lambda_1 : float, default=1e-6
101
+ Hyper-parameter : shape parameter for the Gamma distribution prior
102
+ over the lambda parameter.
103
+
104
+ lambda_2 : float, default=1e-6
105
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
106
+ Gamma distribution prior over the lambda parameter.
107
+
108
+ alpha_init : float, default=None
109
+ Initial value for alpha (precision of the noise).
110
+ If not set, alpha_init is 1/Var(y).
111
+
112
+ .. versionadded:: 0.22
113
+
114
+ lambda_init : float, default=None
115
+ Initial value for lambda (precision of the weights).
116
+ If not set, lambda_init is 1.
117
+
118
+ .. versionadded:: 0.22
119
+
120
+ compute_score : bool, default=False
121
+ If True, compute the log marginal likelihood at each iteration of the
122
+ optimization.
123
+
124
+ fit_intercept : bool, default=True
125
+ Whether to calculate the intercept for this model.
126
+ The intercept is not treated as a probabilistic parameter
127
+ and thus has no associated variance. If set
128
+ to False, no intercept will be used in calculations
129
+ (i.e. data is expected to be centered).
130
+
131
+ copy_X : bool, default=True
132
+ If True, X will be copied; else, it may be overwritten.
133
+
134
+ verbose : bool, default=False
135
+ Verbose mode when fitting the model.
136
+
137
+ n_iter : int
138
+ Maximum number of iterations. Should be greater than or equal to 1.
139
+
140
+ .. deprecated:: 1.3
141
+ `n_iter` is deprecated in 1.3 and will be removed in 1.5. Use
142
+ `max_iter` instead.
143
+
144
+ Attributes
145
+ ----------
146
+ coef_ : array-like of shape (n_features,)
147
+ Coefficients of the regression model (mean of distribution)
148
+
149
+ intercept_ : float
150
+ Independent term in decision function. Set to 0.0 if
151
+ `fit_intercept = False`.
152
+
153
+ alpha_ : float
154
+ Estimated precision of the noise.
155
+
156
+ lambda_ : float
157
+ Estimated precision of the weights.
158
+
159
+ sigma_ : array-like of shape (n_features, n_features)
160
+ Estimated variance-covariance matrix of the weights
161
+
162
+ scores_ : array-like of shape (n_iter_+1,)
163
+ If computed_score is True, value of the log marginal likelihood (to be
164
+ maximized) at each iteration of the optimization. The array starts
165
+ with the value of the log marginal likelihood obtained for the initial
166
+ values of alpha and lambda and ends with the value obtained for the
167
+ estimated alpha and lambda.
168
+
169
+ n_iter_ : int
170
+ The actual number of iterations to reach the stopping criterion.
171
+
172
+ X_offset_ : ndarray of shape (n_features,)
173
+ If `fit_intercept=True`, offset subtracted for centering data to a
174
+ zero mean. Set to np.zeros(n_features) otherwise.
175
+
176
+ X_scale_ : ndarray of shape (n_features,)
177
+ Set to np.ones(n_features).
178
+
179
+ n_features_in_ : int
180
+ Number of features seen during :term:`fit`.
181
+
182
+ .. versionadded:: 0.24
183
+
184
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
185
+ Names of features seen during :term:`fit`. Defined only when `X`
186
+ has feature names that are all strings.
187
+
188
+ .. versionadded:: 1.0
189
+
190
+ See Also
191
+ --------
192
+ ARDRegression : Bayesian ARD regression.
193
+
194
+ Notes
195
+ -----
196
+ There exist several strategies to perform Bayesian ridge regression. This
197
+ implementation is based on the algorithm described in Appendix A of
198
+ (Tipping, 2001) where updates of the regularization parameters are done as
199
+ suggested in (MacKay, 1992). Note that according to A New
200
+ View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
201
+ update rules do not guarantee that the marginal likelihood is increasing
202
+ between two consecutive iterations of the optimization.
203
+
204
+ References
205
+ ----------
206
+ D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
207
+ Vol. 4, No. 3, 1992.
208
+
209
+ M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
210
+ Journal of Machine Learning Research, Vol. 1, 2001.
211
+
212
+ Examples
213
+ --------
214
+ >>> from sklearn import linear_model
215
+ >>> clf = linear_model.BayesianRidge()
216
+ >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
217
+ BayesianRidge()
218
+ >>> clf.predict([[1, 1]])
219
+ array([1.])
220
+ """
221
+
222
+ _parameter_constraints: dict = {
223
+ "max_iter": [Interval(Integral, 1, None, closed="left"), None],
224
+ "tol": [Interval(Real, 0, None, closed="neither")],
225
+ "alpha_1": [Interval(Real, 0, None, closed="left")],
226
+ "alpha_2": [Interval(Real, 0, None, closed="left")],
227
+ "lambda_1": [Interval(Real, 0, None, closed="left")],
228
+ "lambda_2": [Interval(Real, 0, None, closed="left")],
229
+ "alpha_init": [None, Interval(Real, 0, None, closed="left")],
230
+ "lambda_init": [None, Interval(Real, 0, None, closed="left")],
231
+ "compute_score": ["boolean"],
232
+ "fit_intercept": ["boolean"],
233
+ "copy_X": ["boolean"],
234
+ "verbose": ["verbose"],
235
+ "n_iter": [
236
+ Interval(Integral, 1, None, closed="left"),
237
+ Hidden(StrOptions({"deprecated"})),
238
+ ],
239
+ }
240
+
241
+ def __init__(
242
+ self,
243
+ *,
244
+ max_iter=None, # TODO(1.5): Set to 300
245
+ tol=1.0e-3,
246
+ alpha_1=1.0e-6,
247
+ alpha_2=1.0e-6,
248
+ lambda_1=1.0e-6,
249
+ lambda_2=1.0e-6,
250
+ alpha_init=None,
251
+ lambda_init=None,
252
+ compute_score=False,
253
+ fit_intercept=True,
254
+ copy_X=True,
255
+ verbose=False,
256
+ n_iter="deprecated", # TODO(1.5): Remove
257
+ ):
258
+ self.max_iter = max_iter
259
+ self.tol = tol
260
+ self.alpha_1 = alpha_1
261
+ self.alpha_2 = alpha_2
262
+ self.lambda_1 = lambda_1
263
+ self.lambda_2 = lambda_2
264
+ self.alpha_init = alpha_init
265
+ self.lambda_init = lambda_init
266
+ self.compute_score = compute_score
267
+ self.fit_intercept = fit_intercept
268
+ self.copy_X = copy_X
269
+ self.verbose = verbose
270
+ self.n_iter = n_iter
271
+
272
+ @_fit_context(prefer_skip_nested_validation=True)
273
+ def fit(self, X, y, sample_weight=None):
274
+ """Fit the model.
275
+
276
+ Parameters
277
+ ----------
278
+ X : ndarray of shape (n_samples, n_features)
279
+ Training data.
280
+ y : ndarray of shape (n_samples,)
281
+ Target values. Will be cast to X's dtype if necessary.
282
+
283
+ sample_weight : ndarray of shape (n_samples,), default=None
284
+ Individual weights for each sample.
285
+
286
+ .. versionadded:: 0.20
287
+ parameter *sample_weight* support to BayesianRidge.
288
+
289
+ Returns
290
+ -------
291
+ self : object
292
+ Returns the instance itself.
293
+ """
294
+ max_iter = _deprecate_n_iter(self.n_iter, self.max_iter)
295
+
296
+ X, y = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True)
297
+ dtype = X.dtype
298
+
299
+ if sample_weight is not None:
300
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype)
301
+
302
+ X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data(
303
+ X,
304
+ y,
305
+ fit_intercept=self.fit_intercept,
306
+ copy=self.copy_X,
307
+ sample_weight=sample_weight,
308
+ )
309
+
310
+ if sample_weight is not None:
311
+ # Sample weight can be implemented via a simple rescaling.
312
+ X, y, _ = _rescale_data(X, y, sample_weight)
313
+
314
+ self.X_offset_ = X_offset_
315
+ self.X_scale_ = X_scale_
316
+ n_samples, n_features = X.shape
317
+
318
+ # Initialization of the values of the parameters
319
+ eps = np.finfo(np.float64).eps
320
+ # Add `eps` in the denominator to omit division by zero if `np.var(y)`
321
+ # is zero
322
+ alpha_ = self.alpha_init
323
+ lambda_ = self.lambda_init
324
+ if alpha_ is None:
325
+ alpha_ = 1.0 / (np.var(y) + eps)
326
+ if lambda_ is None:
327
+ lambda_ = 1.0
328
+
329
+ # Avoid unintended type promotion to float64 with numpy 2
330
+ alpha_ = np.asarray(alpha_, dtype=dtype)
331
+ lambda_ = np.asarray(lambda_, dtype=dtype)
332
+
333
+ verbose = self.verbose
334
+ lambda_1 = self.lambda_1
335
+ lambda_2 = self.lambda_2
336
+ alpha_1 = self.alpha_1
337
+ alpha_2 = self.alpha_2
338
+
339
+ self.scores_ = list()
340
+ coef_old_ = None
341
+
342
+ XT_y = np.dot(X.T, y)
343
+ U, S, Vh = linalg.svd(X, full_matrices=False)
344
+ eigen_vals_ = S**2
345
+
346
+ # Convergence loop of the bayesian ridge regression
347
+ for iter_ in range(max_iter):
348
+ # update posterior mean coef_ based on alpha_ and lambda_ and
349
+ # compute corresponding rmse
350
+ coef_, rmse_ = self._update_coef_(
351
+ X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
352
+ )
353
+ if self.compute_score:
354
+ # compute the log marginal likelihood
355
+ s = self._log_marginal_likelihood(
356
+ n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_
357
+ )
358
+ self.scores_.append(s)
359
+
360
+ # Update alpha and lambda according to (MacKay, 1992)
361
+ gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))
362
+ lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2)
363
+ alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)
364
+
365
+ # Check for convergence
366
+ if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
367
+ if verbose:
368
+ print("Convergence after ", str(iter_), " iterations")
369
+ break
370
+ coef_old_ = np.copy(coef_)
371
+
372
+ self.n_iter_ = iter_ + 1
373
+
374
+ # return regularization parameters and corresponding posterior mean,
375
+ # log marginal likelihood and posterior covariance
376
+ self.alpha_ = alpha_
377
+ self.lambda_ = lambda_
378
+ self.coef_, rmse_ = self._update_coef_(
379
+ X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
380
+ )
381
+ if self.compute_score:
382
+ # compute the log marginal likelihood
383
+ s = self._log_marginal_likelihood(
384
+ n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_
385
+ )
386
+ self.scores_.append(s)
387
+ self.scores_ = np.array(self.scores_)
388
+
389
+ # posterior covariance is given by 1/alpha_ * scaled_sigma_
390
+ scaled_sigma_ = np.dot(
391
+ Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis]
392
+ )
393
+ self.sigma_ = (1.0 / alpha_) * scaled_sigma_
394
+
395
+ self._set_intercept(X_offset_, y_offset_, X_scale_)
396
+
397
+ return self
398
+
399
+ def predict(self, X, return_std=False):
400
+ """Predict using the linear model.
401
+
402
+ In addition to the mean of the predictive distribution, also its
403
+ standard deviation can be returned.
404
+
405
+ Parameters
406
+ ----------
407
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
408
+ Samples.
409
+
410
+ return_std : bool, default=False
411
+ Whether to return the standard deviation of posterior prediction.
412
+
413
+ Returns
414
+ -------
415
+ y_mean : array-like of shape (n_samples,)
416
+ Mean of predictive distribution of query points.
417
+
418
+ y_std : array-like of shape (n_samples,)
419
+ Standard deviation of predictive distribution of query points.
420
+ """
421
+ y_mean = self._decision_function(X)
422
+ if not return_std:
423
+ return y_mean
424
+ else:
425
+ sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
426
+ y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
427
+ return y_mean, y_std
428
+
429
+ def _update_coef_(
430
+ self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
431
+ ):
432
+ """Update posterior mean and compute corresponding rmse.
433
+
434
+ Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
435
+ scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
436
+ + np.dot(X.T, X))^-1
437
+ """
438
+
439
+ if n_samples > n_features:
440
+ coef_ = np.linalg.multi_dot(
441
+ [Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y]
442
+ )
443
+ else:
444
+ coef_ = np.linalg.multi_dot(
445
+ [X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y]
446
+ )
447
+
448
+ rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
449
+
450
+ return coef_, rmse_
451
+
452
+ def _log_marginal_likelihood(
453
+ self, n_samples, n_features, eigen_vals, alpha_, lambda_, coef, rmse
454
+ ):
455
+ """Log marginal likelihood."""
456
+ alpha_1 = self.alpha_1
457
+ alpha_2 = self.alpha_2
458
+ lambda_1 = self.lambda_1
459
+ lambda_2 = self.lambda_2
460
+
461
+ # compute the log of the determinant of the posterior covariance.
462
+ # posterior covariance is given by
463
+ # sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
464
+ if n_samples > n_features:
465
+ logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals))
466
+ else:
467
+ logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
468
+ logdet_sigma[:n_samples] += alpha_ * eigen_vals
469
+ logdet_sigma = -np.sum(np.log(logdet_sigma))
470
+
471
+ score = lambda_1 * log(lambda_) - lambda_2 * lambda_
472
+ score += alpha_1 * log(alpha_) - alpha_2 * alpha_
473
+ score += 0.5 * (
474
+ n_features * log(lambda_)
475
+ + n_samples * log(alpha_)
476
+ - alpha_ * rmse
477
+ - lambda_ * np.sum(coef**2)
478
+ + logdet_sigma
479
+ - n_samples * log(2 * np.pi)
480
+ )
481
+
482
+ return score
483
+
484
+
485
+ ###############################################################################
486
+ # ARD (Automatic Relevance Determination) regression
487
+
488
+
489
+ class ARDRegression(RegressorMixin, LinearModel):
490
+ """Bayesian ARD regression.
491
+
492
+ Fit the weights of a regression model, using an ARD prior. The weights of
493
+ the regression model are assumed to be in Gaussian distributions.
494
+ Also estimate the parameters lambda (precisions of the distributions of the
495
+ weights) and alpha (precision of the distribution of the noise).
496
+ The estimation is done by an iterative procedures (Evidence Maximization)
497
+
498
+ Read more in the :ref:`User Guide <bayesian_regression>`.
499
+
500
+ Parameters
501
+ ----------
502
+ max_iter : int, default=None
503
+ Maximum number of iterations. If `None`, it corresponds to `max_iter=300`.
504
+
505
+ .. versionchanged:: 1.3
506
+
507
+ tol : float, default=1e-3
508
+ Stop the algorithm if w has converged.
509
+
510
+ alpha_1 : float, default=1e-6
511
+ Hyper-parameter : shape parameter for the Gamma distribution prior
512
+ over the alpha parameter.
513
+
514
+ alpha_2 : float, default=1e-6
515
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
516
+ Gamma distribution prior over the alpha parameter.
517
+
518
+ lambda_1 : float, default=1e-6
519
+ Hyper-parameter : shape parameter for the Gamma distribution prior
520
+ over the lambda parameter.
521
+
522
+ lambda_2 : float, default=1e-6
523
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
524
+ Gamma distribution prior over the lambda parameter.
525
+
526
+ compute_score : bool, default=False
527
+ If True, compute the objective function at each step of the model.
528
+
529
+ threshold_lambda : float, default=10 000
530
+ Threshold for removing (pruning) weights with high precision from
531
+ the computation.
532
+
533
+ fit_intercept : bool, default=True
534
+ Whether to calculate the intercept for this model. If set
535
+ to false, no intercept will be used in calculations
536
+ (i.e. data is expected to be centered).
537
+
538
+ copy_X : bool, default=True
539
+ If True, X will be copied; else, it may be overwritten.
540
+
541
+ verbose : bool, default=False
542
+ Verbose mode when fitting the model.
543
+
544
+ n_iter : int
545
+ Maximum number of iterations.
546
+
547
+ .. deprecated:: 1.3
548
+ `n_iter` is deprecated in 1.3 and will be removed in 1.5. Use
549
+ `max_iter` instead.
550
+
551
+ Attributes
552
+ ----------
553
+ coef_ : array-like of shape (n_features,)
554
+ Coefficients of the regression model (mean of distribution)
555
+
556
+ alpha_ : float
557
+ estimated precision of the noise.
558
+
559
+ lambda_ : array-like of shape (n_features,)
560
+ estimated precisions of the weights.
561
+
562
+ sigma_ : array-like of shape (n_features, n_features)
563
+ estimated variance-covariance matrix of the weights
564
+
565
+ scores_ : float
566
+ if computed, value of the objective function (to be maximized)
567
+
568
+ n_iter_ : int
569
+ The actual number of iterations to reach the stopping criterion.
570
+
571
+ .. versionadded:: 1.3
572
+
573
+ intercept_ : float
574
+ Independent term in decision function. Set to 0.0 if
575
+ ``fit_intercept = False``.
576
+
577
+ X_offset_ : float
578
+ If `fit_intercept=True`, offset subtracted for centering data to a
579
+ zero mean. Set to np.zeros(n_features) otherwise.
580
+
581
+ X_scale_ : float
582
+ Set to np.ones(n_features).
583
+
584
+ n_features_in_ : int
585
+ Number of features seen during :term:`fit`.
586
+
587
+ .. versionadded:: 0.24
588
+
589
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
590
+ Names of features seen during :term:`fit`. Defined only when `X`
591
+ has feature names that are all strings.
592
+
593
+ .. versionadded:: 1.0
594
+
595
+ See Also
596
+ --------
597
+ BayesianRidge : Bayesian ridge regression.
598
+
599
+ Notes
600
+ -----
601
+ For an example, see :ref:`examples/linear_model/plot_ard.py
602
+ <sphx_glr_auto_examples_linear_model_plot_ard.py>`.
603
+
604
+ References
605
+ ----------
606
+ D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
607
+ competition, ASHRAE Transactions, 1994.
608
+
609
+ R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
610
+ http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
611
+ Their beta is our ``self.alpha_``
612
+ Their alpha is our ``self.lambda_``
613
+ ARD is a little different than the slide: only dimensions/features for
614
+ which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
615
+ discarded.
616
+
617
+ Examples
618
+ --------
619
+ >>> from sklearn import linear_model
620
+ >>> clf = linear_model.ARDRegression()
621
+ >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
622
+ ARDRegression()
623
+ >>> clf.predict([[1, 1]])
624
+ array([1.])
625
+ """
626
+
627
+ _parameter_constraints: dict = {
628
+ "max_iter": [Interval(Integral, 1, None, closed="left"), None],
629
+ "tol": [Interval(Real, 0, None, closed="left")],
630
+ "alpha_1": [Interval(Real, 0, None, closed="left")],
631
+ "alpha_2": [Interval(Real, 0, None, closed="left")],
632
+ "lambda_1": [Interval(Real, 0, None, closed="left")],
633
+ "lambda_2": [Interval(Real, 0, None, closed="left")],
634
+ "compute_score": ["boolean"],
635
+ "threshold_lambda": [Interval(Real, 0, None, closed="left")],
636
+ "fit_intercept": ["boolean"],
637
+ "copy_X": ["boolean"],
638
+ "verbose": ["verbose"],
639
+ "n_iter": [
640
+ Interval(Integral, 1, None, closed="left"),
641
+ Hidden(StrOptions({"deprecated"})),
642
+ ],
643
+ }
644
+
645
+ def __init__(
646
+ self,
647
+ *,
648
+ max_iter=None, # TODO(1.5): Set to 300
649
+ tol=1.0e-3,
650
+ alpha_1=1.0e-6,
651
+ alpha_2=1.0e-6,
652
+ lambda_1=1.0e-6,
653
+ lambda_2=1.0e-6,
654
+ compute_score=False,
655
+ threshold_lambda=1.0e4,
656
+ fit_intercept=True,
657
+ copy_X=True,
658
+ verbose=False,
659
+ n_iter="deprecated", # TODO(1.5): Remove
660
+ ):
661
+ self.max_iter = max_iter
662
+ self.tol = tol
663
+ self.fit_intercept = fit_intercept
664
+ self.alpha_1 = alpha_1
665
+ self.alpha_2 = alpha_2
666
+ self.lambda_1 = lambda_1
667
+ self.lambda_2 = lambda_2
668
+ self.compute_score = compute_score
669
+ self.threshold_lambda = threshold_lambda
670
+ self.copy_X = copy_X
671
+ self.verbose = verbose
672
+ self.n_iter = n_iter
673
+
674
+ @_fit_context(prefer_skip_nested_validation=True)
675
+ def fit(self, X, y):
676
+ """Fit the model according to the given training data and parameters.
677
+
678
+ Iterative procedure to maximize the evidence
679
+
680
+ Parameters
681
+ ----------
682
+ X : array-like of shape (n_samples, n_features)
683
+ Training vector, where `n_samples` is the number of samples and
684
+ `n_features` is the number of features.
685
+ y : array-like of shape (n_samples,)
686
+ Target values (integers). Will be cast to X's dtype if necessary.
687
+
688
+ Returns
689
+ -------
690
+ self : object
691
+ Fitted estimator.
692
+ """
693
+ max_iter = _deprecate_n_iter(self.n_iter, self.max_iter)
694
+
695
+ X, y = self._validate_data(
696
+ X, y, dtype=[np.float64, np.float32], y_numeric=True, ensure_min_samples=2
697
+ )
698
+ dtype = X.dtype
699
+
700
+ n_samples, n_features = X.shape
701
+ coef_ = np.zeros(n_features, dtype=dtype)
702
+
703
+ X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data(
704
+ X, y, fit_intercept=self.fit_intercept, copy=self.copy_X
705
+ )
706
+
707
+ self.X_offset_ = X_offset_
708
+ self.X_scale_ = X_scale_
709
+
710
+ # Launch the convergence loop
711
+ keep_lambda = np.ones(n_features, dtype=bool)
712
+
713
+ lambda_1 = self.lambda_1
714
+ lambda_2 = self.lambda_2
715
+ alpha_1 = self.alpha_1
716
+ alpha_2 = self.alpha_2
717
+ verbose = self.verbose
718
+
719
+ # Initialization of the values of the parameters
720
+ eps = np.finfo(np.float64).eps
721
+ # Add `eps` in the denominator to omit division by zero if `np.var(y)`
722
+ # is zero.
723
+ # Explicitly set dtype to avoid unintended type promotion with numpy 2.
724
+ alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype)
725
+ lambda_ = np.ones(n_features, dtype=dtype)
726
+
727
+ self.scores_ = list()
728
+ coef_old_ = None
729
+
730
+ def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
731
+ coef_[keep_lambda] = alpha_ * np.linalg.multi_dot(
732
+ [sigma_, X[:, keep_lambda].T, y]
733
+ )
734
+ return coef_
735
+
736
+ update_sigma = (
737
+ self._update_sigma
738
+ if n_samples >= n_features
739
+ else self._update_sigma_woodbury
740
+ )
741
+ # Iterative procedure of ARDRegression
742
+ for iter_ in range(max_iter):
743
+ sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
744
+ coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
745
+
746
+ # Update alpha and lambda
747
+ rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
748
+ gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)
749
+ lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (
750
+ (coef_[keep_lambda]) ** 2 + 2.0 * lambda_2
751
+ )
752
+ alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (
753
+ rmse_ + 2.0 * alpha_2
754
+ )
755
+
756
+ # Prune the weights with a precision over a threshold
757
+ keep_lambda = lambda_ < self.threshold_lambda
758
+ coef_[~keep_lambda] = 0
759
+
760
+ # Compute the objective function
761
+ if self.compute_score:
762
+ s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
763
+ s += alpha_1 * log(alpha_) - alpha_2 * alpha_
764
+ s += 0.5 * (
765
+ fast_logdet(sigma_)
766
+ + n_samples * log(alpha_)
767
+ + np.sum(np.log(lambda_))
768
+ )
769
+ s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum())
770
+ self.scores_.append(s)
771
+
772
+ # Check for convergence
773
+ if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
774
+ if verbose:
775
+ print("Converged after %s iterations" % iter_)
776
+ break
777
+ coef_old_ = np.copy(coef_)
778
+
779
+ if not keep_lambda.any():
780
+ break
781
+
782
+ self.n_iter_ = iter_ + 1
783
+
784
+ if keep_lambda.any():
785
+ # update sigma and mu using updated params from the last iteration
786
+ sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
787
+ coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
788
+ else:
789
+ sigma_ = np.array([]).reshape(0, 0)
790
+
791
+ self.coef_ = coef_
792
+ self.alpha_ = alpha_
793
+ self.sigma_ = sigma_
794
+ self.lambda_ = lambda_
795
+ self._set_intercept(X_offset_, y_offset_, X_scale_)
796
+ return self
797
+
798
+ def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):
799
+ # See slides as referenced in the docstring note
800
+ # this function is used when n_samples < n_features and will invert
801
+ # a matrix of shape (n_samples, n_samples) making use of the
802
+ # woodbury formula:
803
+ # https://en.wikipedia.org/wiki/Woodbury_matrix_identity
804
+ n_samples = X.shape[0]
805
+ X_keep = X[:, keep_lambda]
806
+ inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)
807
+ sigma_ = pinvh(
808
+ np.eye(n_samples, dtype=X.dtype) / alpha_
809
+ + np.dot(X_keep * inv_lambda, X_keep.T)
810
+ )
811
+ sigma_ = np.dot(sigma_, X_keep * inv_lambda)
812
+ sigma_ = -np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)
813
+ sigma_[np.diag_indices(sigma_.shape[1])] += 1.0 / lambda_[keep_lambda]
814
+ return sigma_
815
+
816
+ def _update_sigma(self, X, alpha_, lambda_, keep_lambda):
817
+ # See slides as referenced in the docstring note
818
+ # this function is used when n_samples >= n_features and will
819
+ # invert a matrix of shape (n_features, n_features)
820
+ X_keep = X[:, keep_lambda]
821
+ gram = np.dot(X_keep.T, X_keep)
822
+ eye = np.eye(gram.shape[0], dtype=X.dtype)
823
+ sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram
824
+ sigma_ = pinvh(sigma_inv)
825
+ return sigma_
826
+
827
+ def predict(self, X, return_std=False):
828
+ """Predict using the linear model.
829
+
830
+ In addition to the mean of the predictive distribution, also its
831
+ standard deviation can be returned.
832
+
833
+ Parameters
834
+ ----------
835
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
836
+ Samples.
837
+
838
+ return_std : bool, default=False
839
+ Whether to return the standard deviation of posterior prediction.
840
+
841
+ Returns
842
+ -------
843
+ y_mean : array-like of shape (n_samples,)
844
+ Mean of predictive distribution of query points.
845
+
846
+ y_std : array-like of shape (n_samples,)
847
+ Standard deviation of predictive distribution of query points.
848
+ """
849
+ y_mean = self._decision_function(X)
850
+ if return_std is False:
851
+ return y_mean
852
+ else:
853
+ col_index = self.lambda_ < self.threshold_lambda
854
+ X = _safe_indexing(X, indices=col_index, axis=1)
855
+ sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
856
+ y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
857
+ return y_mean, y_std
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # License: BSD 3 clause
2
+
3
+ from .glm import (
4
+ GammaRegressor,
5
+ PoissonRegressor,
6
+ TweedieRegressor,
7
+ _GeneralizedLinearRegressor,
8
+ )
9
+
10
+ __all__ = [
11
+ "_GeneralizedLinearRegressor",
12
+ "PoissonRegressor",
13
+ "GammaRegressor",
14
+ "TweedieRegressor",
15
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (373 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Newton solver for Generalized Linear Models
3
+ """
4
+
5
+ # Author: Christian Lorentzen <[email protected]>
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from abc import ABC, abstractmethod
10
+
11
+ import numpy as np
12
+ import scipy.linalg
13
+ import scipy.optimize
14
+
15
+ from ..._loss.loss import HalfSquaredError
16
+ from ...exceptions import ConvergenceWarning
17
+ from ...utils.optimize import _check_optimize_result
18
+ from .._linear_loss import LinearModelLoss
19
+
20
+
21
+ class NewtonSolver(ABC):
22
+ """Newton solver for GLMs.
23
+
24
+ This class implements Newton/2nd-order optimization routines for GLMs. Each Newton
25
+ iteration aims at finding the Newton step which is done by the inner solver. With
26
+ Hessian H, gradient g and coefficients coef, one step solves:
27
+
28
+ H @ coef_newton = -g
29
+
30
+ For our GLM / LinearModelLoss, we have gradient g and Hessian H:
31
+
32
+ g = X.T @ loss.gradient + l2_reg_strength * coef
33
+ H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity
34
+
35
+ Backtracking line search updates coef = coef_old + t * coef_newton for some t in
36
+ (0, 1].
37
+
38
+ This is a base class, actual implementations (child classes) may deviate from the
39
+ above pattern and use structure specific tricks.
40
+
41
+ Usage pattern:
42
+ - initialize solver: sol = NewtonSolver(...)
43
+ - solve the problem: sol.solve(X, y, sample_weight)
44
+
45
+ References
46
+ ----------
47
+ - Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization"
48
+ 2nd edition
49
+ https://doi.org/10.1007/978-0-387-40065-5
50
+
51
+ - Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization."
52
+ Cambridge University Press, 2004.
53
+ https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf
54
+
55
+ Parameters
56
+ ----------
57
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
58
+ Initial coefficients of a linear model.
59
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
60
+ i.e. one reconstructs the 2d-array via
61
+ coef.reshape((n_classes, -1), order="F").
62
+
63
+ linear_loss : LinearModelLoss
64
+ The loss to be minimized.
65
+
66
+ l2_reg_strength : float, default=0.0
67
+ L2 regularization strength.
68
+
69
+ tol : float, default=1e-4
70
+ The optimization problem is solved when each of the following condition is
71
+ fulfilled:
72
+ 1. maximum |gradient| <= tol
73
+ 2. Newton decrement d: 1/2 * d^2 <= tol
74
+
75
+ max_iter : int, default=100
76
+ Maximum number of Newton steps allowed.
77
+
78
+ n_threads : int, default=1
79
+ Number of OpenMP threads to use for the computation of the Hessian and gradient
80
+ of the loss function.
81
+
82
+ Attributes
83
+ ----------
84
+ coef_old : ndarray of shape coef.shape
85
+ Coefficient of previous iteration.
86
+
87
+ coef_newton : ndarray of shape coef.shape
88
+ Newton step.
89
+
90
+ gradient : ndarray of shape coef.shape
91
+ Gradient of the loss w.r.t. the coefficients.
92
+
93
+ gradient_old : ndarray of shape coef.shape
94
+ Gradient of previous iteration.
95
+
96
+ loss_value : float
97
+ Value of objective function = loss + penalty.
98
+
99
+ loss_value_old : float
100
+ Value of objective function of previous itertion.
101
+
102
+ raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)
103
+
104
+ converged : bool
105
+ Indicator for convergence of the solver.
106
+
107
+ iteration : int
108
+ Number of Newton steps, i.e. calls to inner_solve
109
+
110
+ use_fallback_lbfgs_solve : bool
111
+ If set to True, the solver will resort to call LBFGS to finish the optimisation
112
+ procedure in case of convergence issues.
113
+
114
+ gradient_times_newton : float
115
+ gradient @ coef_newton, set in inner_solve and used by line_search. If the
116
+ Newton step is a descent direction, this is negative.
117
+ """
118
+
119
+ def __init__(
120
+ self,
121
+ *,
122
+ coef,
123
+ linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True),
124
+ l2_reg_strength=0.0,
125
+ tol=1e-4,
126
+ max_iter=100,
127
+ n_threads=1,
128
+ verbose=0,
129
+ ):
130
+ self.coef = coef
131
+ self.linear_loss = linear_loss
132
+ self.l2_reg_strength = l2_reg_strength
133
+ self.tol = tol
134
+ self.max_iter = max_iter
135
+ self.n_threads = n_threads
136
+ self.verbose = verbose
137
+
138
+ def setup(self, X, y, sample_weight):
139
+ """Precomputations
140
+
141
+ If None, initializes:
142
+ - self.coef
143
+ Sets:
144
+ - self.raw_prediction
145
+ - self.loss_value
146
+ """
147
+ _, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)
148
+ self.loss_value = self.linear_loss.loss(
149
+ coef=self.coef,
150
+ X=X,
151
+ y=y,
152
+ sample_weight=sample_weight,
153
+ l2_reg_strength=self.l2_reg_strength,
154
+ n_threads=self.n_threads,
155
+ raw_prediction=self.raw_prediction,
156
+ )
157
+
158
+ @abstractmethod
159
+ def update_gradient_hessian(self, X, y, sample_weight):
160
+ """Update gradient and Hessian."""
161
+
162
+ @abstractmethod
163
+ def inner_solve(self, X, y, sample_weight):
164
+ """Compute Newton step.
165
+
166
+ Sets:
167
+ - self.coef_newton
168
+ - self.gradient_times_newton
169
+ """
170
+
171
+ def fallback_lbfgs_solve(self, X, y, sample_weight):
172
+ """Fallback solver in case of emergency.
173
+
174
+ If a solver detects convergence problems, it may fall back to this methods in
175
+ the hope to exit with success instead of raising an error.
176
+
177
+ Sets:
178
+ - self.coef
179
+ - self.converged
180
+ """
181
+ opt_res = scipy.optimize.minimize(
182
+ self.linear_loss.loss_gradient,
183
+ self.coef,
184
+ method="L-BFGS-B",
185
+ jac=True,
186
+ options={
187
+ "maxiter": self.max_iter,
188
+ "maxls": 50, # default is 20
189
+ "iprint": self.verbose - 1,
190
+ "gtol": self.tol,
191
+ "ftol": 64 * np.finfo(np.float64).eps,
192
+ },
193
+ args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
194
+ )
195
+ self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
196
+ self.coef = opt_res.x
197
+ self.converged = opt_res.status == 0
198
+
199
+ def line_search(self, X, y, sample_weight):
200
+ """Backtracking line search.
201
+
202
+ Sets:
203
+ - self.coef_old
204
+ - self.coef
205
+ - self.loss_value_old
206
+ - self.loss_value
207
+ - self.gradient_old
208
+ - self.gradient
209
+ - self.raw_prediction
210
+ """
211
+ # line search parameters
212
+ beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11
213
+ eps = 16 * np.finfo(self.loss_value.dtype).eps
214
+ t = 1 # step size
215
+
216
+ # gradient_times_newton = self.gradient @ self.coef_newton
217
+ # was computed in inner_solve.
218
+ armijo_term = sigma * self.gradient_times_newton
219
+ _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(
220
+ self.coef_newton, X
221
+ )
222
+
223
+ self.coef_old = self.coef
224
+ self.loss_value_old = self.loss_value
225
+ self.gradient_old = self.gradient
226
+
227
+ # np.sum(np.abs(self.gradient_old))
228
+ sum_abs_grad_old = -1
229
+
230
+ is_verbose = self.verbose >= 2
231
+ if is_verbose:
232
+ print(" Backtracking Line Search")
233
+ print(f" eps=10 * finfo.eps={eps}")
234
+
235
+ for i in range(21): # until and including t = beta**20 ~ 1e-6
236
+ self.coef = self.coef_old + t * self.coef_newton
237
+ raw = self.raw_prediction + t * raw_prediction_newton
238
+ self.loss_value, self.gradient = self.linear_loss.loss_gradient(
239
+ coef=self.coef,
240
+ X=X,
241
+ y=y,
242
+ sample_weight=sample_weight,
243
+ l2_reg_strength=self.l2_reg_strength,
244
+ n_threads=self.n_threads,
245
+ raw_prediction=raw,
246
+ )
247
+ # Note: If coef_newton is too large, loss_gradient may produce inf values,
248
+ # potentially accompanied by a RuntimeWarning.
249
+ # This case will be captured by the Armijo condition.
250
+
251
+ # 1. Check Armijo / sufficient decrease condition.
252
+ # The smaller (more negative) the better.
253
+ loss_improvement = self.loss_value - self.loss_value_old
254
+ check = loss_improvement <= t * armijo_term
255
+ if is_verbose:
256
+ print(
257
+ f" line search iteration={i+1}, step size={t}\n"
258
+ f" check loss improvement <= armijo term: {loss_improvement} "
259
+ f"<= {t * armijo_term} {check}"
260
+ )
261
+ if check:
262
+ break
263
+ # 2. Deal with relative loss differences around machine precision.
264
+ tiny_loss = np.abs(self.loss_value_old * eps)
265
+ check = np.abs(loss_improvement) <= tiny_loss
266
+ if is_verbose:
267
+ print(
268
+ " check loss |improvement| <= eps * |loss_old|:"
269
+ f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
270
+ )
271
+ if check:
272
+ if sum_abs_grad_old < 0:
273
+ sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)
274
+ # 2.1 Check sum of absolute gradients as alternative condition.
275
+ sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)
276
+ check = sum_abs_grad < sum_abs_grad_old
277
+ if is_verbose:
278
+ print(
279
+ " check sum(|gradient|) < sum(|gradient_old|): "
280
+ f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
281
+ )
282
+ if check:
283
+ break
284
+
285
+ t *= beta
286
+ else:
287
+ warnings.warn(
288
+ (
289
+ f"Line search of Newton solver {self.__class__.__name__} at"
290
+ f" iteration #{self.iteration} did no converge after 21 line search"
291
+ " refinement iterations. It will now resort to lbfgs instead."
292
+ ),
293
+ ConvergenceWarning,
294
+ )
295
+ if self.verbose:
296
+ print(" Line search did not converge and resorts to lbfgs instead.")
297
+ self.use_fallback_lbfgs_solve = True
298
+ return
299
+
300
+ self.raw_prediction = raw
301
+
302
+ def check_convergence(self, X, y, sample_weight):
303
+ """Check for convergence.
304
+
305
+ Sets self.converged.
306
+ """
307
+ if self.verbose:
308
+ print(" Check Convergence")
309
+ # Note: Checking maximum relative change of coefficient <= tol is a bad
310
+ # convergence criterion because even a large step could have brought us close
311
+ # to the true minimum.
312
+ # coef_step = self.coef - self.coef_old
313
+ # check = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old)))
314
+
315
+ # 1. Criterion: maximum |gradient| <= tol
316
+ # The gradient was already updated in line_search()
317
+ check = np.max(np.abs(self.gradient))
318
+ if self.verbose:
319
+ print(f" 1. max |gradient| {check} <= {self.tol}")
320
+ if check > self.tol:
321
+ return
322
+
323
+ # 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol
324
+ # d = sqrt(grad @ hessian^-1 @ grad)
325
+ # = sqrt(coef_newton @ hessian @ coef_newton)
326
+ # See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1.
327
+ d2 = self.coef_newton @ self.hessian @ self.coef_newton
328
+ if self.verbose:
329
+ print(f" 2. Newton decrement {0.5 * d2} <= {self.tol}")
330
+ if 0.5 * d2 > self.tol:
331
+ return
332
+
333
+ if self.verbose:
334
+ loss_value = self.linear_loss.loss(
335
+ coef=self.coef,
336
+ X=X,
337
+ y=y,
338
+ sample_weight=sample_weight,
339
+ l2_reg_strength=self.l2_reg_strength,
340
+ n_threads=self.n_threads,
341
+ )
342
+ print(f" Solver did converge at loss = {loss_value}.")
343
+ self.converged = True
344
+
345
+ def finalize(self, X, y, sample_weight):
346
+ """Finalize the solvers results.
347
+
348
+ Some solvers may need this, others not.
349
+ """
350
+ pass
351
+
352
+ def solve(self, X, y, sample_weight):
353
+ """Solve the optimization problem.
354
+
355
+ This is the main routine.
356
+
357
+ Order of calls:
358
+ self.setup()
359
+ while iteration:
360
+ self.update_gradient_hessian()
361
+ self.inner_solve()
362
+ self.line_search()
363
+ self.check_convergence()
364
+ self.finalize()
365
+
366
+ Returns
367
+ -------
368
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
369
+ Solution of the optimization problem.
370
+ """
371
+ # setup usually:
372
+ # - initializes self.coef if needed
373
+ # - initializes and calculates self.raw_predictions, self.loss_value
374
+ self.setup(X=X, y=y, sample_weight=sample_weight)
375
+
376
+ self.iteration = 1
377
+ self.converged = False
378
+ self.use_fallback_lbfgs_solve = False
379
+
380
+ while self.iteration <= self.max_iter and not self.converged:
381
+ if self.verbose:
382
+ print(f"Newton iter={self.iteration}")
383
+
384
+ self.use_fallback_lbfgs_solve = False # Fallback solver.
385
+
386
+ # 1. Update Hessian and gradient
387
+ self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)
388
+
389
+ # TODO:
390
+ # if iteration == 1:
391
+ # We might stop early, e.g. we already are close to the optimum,
392
+ # usually detected by zero gradients at this stage.
393
+
394
+ # 2. Inner solver
395
+ # Calculate Newton step/direction
396
+ # This usually sets self.coef_newton and self.gradient_times_newton.
397
+ self.inner_solve(X=X, y=y, sample_weight=sample_weight)
398
+ if self.use_fallback_lbfgs_solve:
399
+ break
400
+
401
+ # 3. Backtracking line search
402
+ # This usually sets self.coef_old, self.coef, self.loss_value_old
403
+ # self.loss_value, self.gradient_old, self.gradient,
404
+ # self.raw_prediction.
405
+ self.line_search(X=X, y=y, sample_weight=sample_weight)
406
+ if self.use_fallback_lbfgs_solve:
407
+ break
408
+
409
+ # 4. Check convergence
410
+ # Sets self.converged.
411
+ self.check_convergence(X=X, y=y, sample_weight=sample_weight)
412
+
413
+ # 5. Next iteration
414
+ self.iteration += 1
415
+
416
+ if not self.converged:
417
+ if self.use_fallback_lbfgs_solve:
418
+ # Note: The fallback solver circumvents check_convergence and relies on
419
+ # the convergence checks of lbfgs instead. Enough warnings have been
420
+ # raised on the way.
421
+ self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)
422
+ else:
423
+ warnings.warn(
424
+ (
425
+ f"Newton solver did not converge after {self.iteration - 1} "
426
+ "iterations."
427
+ ),
428
+ ConvergenceWarning,
429
+ )
430
+
431
+ self.iteration -= 1
432
+ self.finalize(X=X, y=y, sample_weight=sample_weight)
433
+ return self.coef
434
+
435
+
436
+ class NewtonCholeskySolver(NewtonSolver):
437
+ """Cholesky based Newton solver.
438
+
439
+ Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear
440
+ solver.
441
+ """
442
+
443
+ def setup(self, X, y, sample_weight):
444
+ super().setup(X=X, y=y, sample_weight=sample_weight)
445
+ n_dof = X.shape[1]
446
+ if self.linear_loss.fit_intercept:
447
+ n_dof += 1
448
+ self.gradient = np.empty_like(self.coef)
449
+ self.hessian = np.empty_like(self.coef, shape=(n_dof, n_dof))
450
+
451
+ def update_gradient_hessian(self, X, y, sample_weight):
452
+ _, _, self.hessian_warning = self.linear_loss.gradient_hessian(
453
+ coef=self.coef,
454
+ X=X,
455
+ y=y,
456
+ sample_weight=sample_weight,
457
+ l2_reg_strength=self.l2_reg_strength,
458
+ n_threads=self.n_threads,
459
+ gradient_out=self.gradient,
460
+ hessian_out=self.hessian,
461
+ raw_prediction=self.raw_prediction, # this was updated in line_search
462
+ )
463
+
464
+ def inner_solve(self, X, y, sample_weight):
465
+ if self.hessian_warning:
466
+ warnings.warn(
467
+ (
468
+ f"The inner solver of {self.__class__.__name__} detected a "
469
+ "pointwise hessian with many negative values at iteration "
470
+ f"#{self.iteration}. It will now resort to lbfgs instead."
471
+ ),
472
+ ConvergenceWarning,
473
+ )
474
+ if self.verbose:
475
+ print(
476
+ " The inner solver detected a pointwise Hessian with many "
477
+ "negative values and resorts to lbfgs instead."
478
+ )
479
+ self.use_fallback_lbfgs_solve = True
480
+ return
481
+
482
+ try:
483
+ with warnings.catch_warnings():
484
+ warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
485
+ self.coef_newton = scipy.linalg.solve(
486
+ self.hessian, -self.gradient, check_finite=False, assume_a="sym"
487
+ )
488
+ self.gradient_times_newton = self.gradient @ self.coef_newton
489
+ if self.gradient_times_newton > 0:
490
+ if self.verbose:
491
+ print(
492
+ " The inner solver found a Newton step that is not a "
493
+ "descent direction and resorts to LBFGS steps instead."
494
+ )
495
+ self.use_fallback_lbfgs_solve = True
496
+ return
497
+ except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e:
498
+ warnings.warn(
499
+ f"The inner solver of {self.__class__.__name__} stumbled upon a "
500
+ "singular or very ill-conditioned Hessian matrix at iteration "
501
+ f"#{self.iteration}. It will now resort to lbfgs instead.\n"
502
+ "Further options are to use another solver or to avoid such situation "
503
+ "in the first place. Possible remedies are removing collinear features"
504
+ " of X or increasing the penalization strengths.\n"
505
+ "The original Linear Algebra message was:\n"
506
+ + str(e),
507
+ scipy.linalg.LinAlgWarning,
508
+ )
509
+ # Possible causes:
510
+ # 1. hess_pointwise is negative. But this is already taken care in
511
+ # LinearModelLoss.gradient_hessian.
512
+ # 2. X is singular or ill-conditioned
513
+ # This might be the most probable cause.
514
+ #
515
+ # There are many possible ways to deal with this situation. Most of them
516
+ # add, explicitly or implicitly, a matrix to the hessian to make it
517
+ # positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed.
518
+ # Instead, we resort to lbfgs.
519
+ if self.verbose:
520
+ print(
521
+ " The inner solver stumbled upon an singular or ill-conditioned "
522
+ "Hessian matrix and resorts to LBFGS instead."
523
+ )
524
+ self.use_fallback_lbfgs_solve = True
525
+ return
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generalized Linear Models with Exponential Dispersion Family
3
+ """
4
+
5
+ # Author: Christian Lorentzen <[email protected]>
6
+ # some parts and tricks stolen from other sklearn files.
7
+ # License: BSD 3 clause
8
+
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ import scipy.optimize
13
+
14
+ from ..._loss.loss import (
15
+ HalfGammaLoss,
16
+ HalfPoissonLoss,
17
+ HalfSquaredError,
18
+ HalfTweedieLoss,
19
+ HalfTweedieLossIdentity,
20
+ )
21
+ from ...base import BaseEstimator, RegressorMixin, _fit_context
22
+ from ...utils import check_array
23
+ from ...utils._openmp_helpers import _openmp_effective_n_threads
24
+ from ...utils._param_validation import Hidden, Interval, StrOptions
25
+ from ...utils.optimize import _check_optimize_result
26
+ from ...utils.validation import _check_sample_weight, check_is_fitted
27
+ from .._linear_loss import LinearModelLoss
28
+ from ._newton_solver import NewtonCholeskySolver, NewtonSolver
29
+
30
+
31
+ class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
32
+ """Regression via a penalized Generalized Linear Model (GLM).
33
+
34
+ GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and
35
+ predicting the mean of the target y as y_pred=h(X*w) with coefficients w.
36
+ Therefore, the fit minimizes the following objective function with L2 priors as
37
+ regularizer::
38
+
39
+ 1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2
40
+
41
+ with inverse link function h, s=sample_weight and per observation (unit) deviance
42
+ deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative
43
+ log-likelihood up to a constant (in w) term.
44
+ The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
45
+
46
+ Instead of implementing the EDM family and a link function separately, we directly
47
+ use the loss functions `from sklearn._loss` which have the link functions included
48
+ in them for performance reasons. We pick the loss functions that implement
49
+ (1/2 times) EDM deviances.
50
+
51
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
52
+
53
+ .. versionadded:: 0.23
54
+
55
+ Parameters
56
+ ----------
57
+ alpha : float, default=1
58
+ Constant that multiplies the penalty term and thus determines the
59
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
60
+ GLMs. In this case, the design matrix `X` must have full column rank
61
+ (no collinearities).
62
+ Values must be in the range `[0.0, inf)`.
63
+
64
+ fit_intercept : bool, default=True
65
+ Specifies if a constant (a.k.a. bias or intercept) should be
66
+ added to the linear predictor (X @ coef + intercept).
67
+
68
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
69
+ Algorithm to use in the optimization problem:
70
+
71
+ 'lbfgs'
72
+ Calls scipy's L-BFGS-B optimizer.
73
+
74
+ 'newton-cholesky'
75
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
76
+ iterated reweighted least squares) with an inner Cholesky based solver.
77
+ This solver is a good choice for `n_samples` >> `n_features`, especially
78
+ with one-hot encoded categorical features with rare categories. Be aware
79
+ that the memory usage of this solver has a quadratic dependency on
80
+ `n_features` because it explicitly computes the Hessian matrix.
81
+
82
+ .. versionadded:: 1.2
83
+
84
+ max_iter : int, default=100
85
+ The maximal number of iterations for the solver.
86
+ Values must be in the range `[1, inf)`.
87
+
88
+ tol : float, default=1e-4
89
+ Stopping criterion. For the lbfgs solver,
90
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
91
+ where ``g_j`` is the j-th component of the gradient (derivative) of
92
+ the objective function.
93
+ Values must be in the range `(0.0, inf)`.
94
+
95
+ warm_start : bool, default=False
96
+ If set to ``True``, reuse the solution of the previous call to ``fit``
97
+ as initialization for ``coef_`` and ``intercept_``.
98
+
99
+ verbose : int, default=0
100
+ For the lbfgs solver set verbose to any positive number for verbosity.
101
+ Values must be in the range `[0, inf)`.
102
+
103
+ Attributes
104
+ ----------
105
+ coef_ : array of shape (n_features,)
106
+ Estimated coefficients for the linear predictor (`X @ coef_ +
107
+ intercept_`) in the GLM.
108
+
109
+ intercept_ : float
110
+ Intercept (a.k.a. bias) added to linear predictor.
111
+
112
+ n_iter_ : int
113
+ Actual number of iterations used in the solver.
114
+
115
+ _base_loss : BaseLoss, default=HalfSquaredError()
116
+ This is set during fit via `self._get_loss()`.
117
+ A `_base_loss` contains a specific loss function as well as the link
118
+ function. The loss to be minimized specifies the distributional assumption of
119
+ the GLM, i.e. the distribution from the EDM. Here are some examples:
120
+
121
+ ======================= ======== ==========================
122
+ _base_loss Link Target Domain
123
+ ======================= ======== ==========================
124
+ HalfSquaredError identity y any real number
125
+ HalfPoissonLoss log 0 <= y
126
+ HalfGammaLoss log 0 < y
127
+ HalfTweedieLoss log dependent on tweedie power
128
+ HalfTweedieLossIdentity identity dependent on tweedie power
129
+ ======================= ======== ==========================
130
+
131
+ The link function of the GLM, i.e. mapping from linear predictor
132
+ `X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link,
133
+ we have `y_pred = exp(X @ coeff + intercept)`.
134
+ """
135
+
136
+ # We allow for NewtonSolver classes for the "solver" parameter but do not
137
+ # make them public in the docstrings. This facilitates testing and
138
+ # benchmarking.
139
+ _parameter_constraints: dict = {
140
+ "alpha": [Interval(Real, 0.0, None, closed="left")],
141
+ "fit_intercept": ["boolean"],
142
+ "solver": [
143
+ StrOptions({"lbfgs", "newton-cholesky"}),
144
+ Hidden(type),
145
+ ],
146
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
147
+ "tol": [Interval(Real, 0.0, None, closed="neither")],
148
+ "warm_start": ["boolean"],
149
+ "verbose": ["verbose"],
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ *,
155
+ alpha=1.0,
156
+ fit_intercept=True,
157
+ solver="lbfgs",
158
+ max_iter=100,
159
+ tol=1e-4,
160
+ warm_start=False,
161
+ verbose=0,
162
+ ):
163
+ self.alpha = alpha
164
+ self.fit_intercept = fit_intercept
165
+ self.solver = solver
166
+ self.max_iter = max_iter
167
+ self.tol = tol
168
+ self.warm_start = warm_start
169
+ self.verbose = verbose
170
+
171
+ @_fit_context(prefer_skip_nested_validation=True)
172
+ def fit(self, X, y, sample_weight=None):
173
+ """Fit a Generalized Linear Model.
174
+
175
+ Parameters
176
+ ----------
177
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
178
+ Training data.
179
+
180
+ y : array-like of shape (n_samples,)
181
+ Target values.
182
+
183
+ sample_weight : array-like of shape (n_samples,), default=None
184
+ Sample weights.
185
+
186
+ Returns
187
+ -------
188
+ self : object
189
+ Fitted model.
190
+ """
191
+ X, y = self._validate_data(
192
+ X,
193
+ y,
194
+ accept_sparse=["csc", "csr"],
195
+ dtype=[np.float64, np.float32],
196
+ y_numeric=True,
197
+ multi_output=False,
198
+ )
199
+
200
+ # required by losses
201
+ if self.solver == "lbfgs":
202
+ # lbfgs will force coef and therefore raw_prediction to be float64. The
203
+ # base_loss needs y, X @ coef and sample_weight all of same dtype
204
+ # (and contiguous).
205
+ loss_dtype = np.float64
206
+ else:
207
+ loss_dtype = min(max(y.dtype, X.dtype), np.float64)
208
+ y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False)
209
+
210
+ if sample_weight is not None:
211
+ # Note that _check_sample_weight calls check_array(order="C") required by
212
+ # losses.
213
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
214
+
215
+ n_samples, n_features = X.shape
216
+ self._base_loss = self._get_loss()
217
+
218
+ linear_loss = LinearModelLoss(
219
+ base_loss=self._base_loss,
220
+ fit_intercept=self.fit_intercept,
221
+ )
222
+
223
+ if not linear_loss.base_loss.in_y_true_range(y):
224
+ raise ValueError(
225
+ "Some value(s) of y are out of the valid range of the loss"
226
+ f" {self._base_loss.__class__.__name__!r}."
227
+ )
228
+
229
+ # TODO: if alpha=0 check that X is not rank deficient
230
+
231
+ # NOTE: Rescaling of sample_weight:
232
+ # We want to minimize
233
+ # obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance)
234
+ # + 1/2 * alpha * L2,
235
+ # with
236
+ # deviance = 2 * loss.
237
+ # The objective is invariant to multiplying sample_weight by a constant. We
238
+ # could choose this constant such that sum(sample_weight) = 1 in order to end
239
+ # up with
240
+ # obj = sum(sample_weight * loss) + 1/2 * alpha * L2.
241
+ # But LinearModelLoss.loss() already computes
242
+ # average(loss, weights=sample_weight)
243
+ # Thus, without rescaling, we have
244
+ # obj = LinearModelLoss.loss(...)
245
+
246
+ if self.warm_start and hasattr(self, "coef_"):
247
+ if self.fit_intercept:
248
+ # LinearModelLoss needs intercept at the end of coefficient array.
249
+ coef = np.concatenate((self.coef_, np.array([self.intercept_])))
250
+ else:
251
+ coef = self.coef_
252
+ coef = coef.astype(loss_dtype, copy=False)
253
+ else:
254
+ coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
255
+ if self.fit_intercept:
256
+ coef[-1] = linear_loss.base_loss.link.link(
257
+ np.average(y, weights=sample_weight)
258
+ )
259
+
260
+ l2_reg_strength = self.alpha
261
+ n_threads = _openmp_effective_n_threads()
262
+
263
+ # Algorithms for optimization:
264
+ # Note again that our losses implement 1/2 * deviance.
265
+ if self.solver == "lbfgs":
266
+ func = linear_loss.loss_gradient
267
+
268
+ opt_res = scipy.optimize.minimize(
269
+ func,
270
+ coef,
271
+ method="L-BFGS-B",
272
+ jac=True,
273
+ options={
274
+ "maxiter": self.max_iter,
275
+ "maxls": 50, # default is 20
276
+ "iprint": self.verbose - 1,
277
+ "gtol": self.tol,
278
+ # The constant 64 was found empirically to pass the test suite.
279
+ # The point is that ftol is very small, but a bit larger than
280
+ # machine precision for float64, which is the dtype used by lbfgs.
281
+ "ftol": 64 * np.finfo(float).eps,
282
+ },
283
+ args=(X, y, sample_weight, l2_reg_strength, n_threads),
284
+ )
285
+ self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
286
+ coef = opt_res.x
287
+ elif self.solver == "newton-cholesky":
288
+ sol = NewtonCholeskySolver(
289
+ coef=coef,
290
+ linear_loss=linear_loss,
291
+ l2_reg_strength=l2_reg_strength,
292
+ tol=self.tol,
293
+ max_iter=self.max_iter,
294
+ n_threads=n_threads,
295
+ verbose=self.verbose,
296
+ )
297
+ coef = sol.solve(X, y, sample_weight)
298
+ self.n_iter_ = sol.iteration
299
+ elif issubclass(self.solver, NewtonSolver):
300
+ sol = self.solver(
301
+ coef=coef,
302
+ linear_loss=linear_loss,
303
+ l2_reg_strength=l2_reg_strength,
304
+ tol=self.tol,
305
+ max_iter=self.max_iter,
306
+ n_threads=n_threads,
307
+ )
308
+ coef = sol.solve(X, y, sample_weight)
309
+ self.n_iter_ = sol.iteration
310
+ else:
311
+ raise ValueError(f"Invalid solver={self.solver}.")
312
+
313
+ if self.fit_intercept:
314
+ self.intercept_ = coef[-1]
315
+ self.coef_ = coef[:-1]
316
+ else:
317
+ # set intercept to zero as the other linear models do
318
+ self.intercept_ = 0.0
319
+ self.coef_ = coef
320
+
321
+ return self
322
+
323
+ def _linear_predictor(self, X):
324
+ """Compute the linear_predictor = `X @ coef_ + intercept_`.
325
+
326
+ Note that we often use the term raw_prediction instead of linear predictor.
327
+
328
+ Parameters
329
+ ----------
330
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
331
+ Samples.
332
+
333
+ Returns
334
+ -------
335
+ y_pred : array of shape (n_samples,)
336
+ Returns predicted values of linear predictor.
337
+ """
338
+ check_is_fitted(self)
339
+ X = self._validate_data(
340
+ X,
341
+ accept_sparse=["csr", "csc", "coo"],
342
+ dtype=[np.float64, np.float32],
343
+ ensure_2d=True,
344
+ allow_nd=False,
345
+ reset=False,
346
+ )
347
+ return X @ self.coef_ + self.intercept_
348
+
349
+ def predict(self, X):
350
+ """Predict using GLM with feature matrix X.
351
+
352
+ Parameters
353
+ ----------
354
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
355
+ Samples.
356
+
357
+ Returns
358
+ -------
359
+ y_pred : array of shape (n_samples,)
360
+ Returns predicted values.
361
+ """
362
+ # check_array is done in _linear_predictor
363
+ raw_prediction = self._linear_predictor(X)
364
+ y_pred = self._base_loss.link.inverse(raw_prediction)
365
+ return y_pred
366
+
367
+ def score(self, X, y, sample_weight=None):
368
+ """Compute D^2, the percentage of deviance explained.
369
+
370
+ D^2 is a generalization of the coefficient of determination R^2.
371
+ R^2 uses squared error and D^2 uses the deviance of this GLM, see the
372
+ :ref:`User Guide <regression_metrics>`.
373
+
374
+ D^2 is defined as
375
+ :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
376
+ :math:`D_{null}` is the null deviance, i.e. the deviance of a model
377
+ with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
378
+ The mean :math:`\\bar{y}` is averaged by sample_weight.
379
+ Best possible score is 1.0 and it can be negative (because the model
380
+ can be arbitrarily worse).
381
+
382
+ Parameters
383
+ ----------
384
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
385
+ Test samples.
386
+
387
+ y : array-like of shape (n_samples,)
388
+ True values of target.
389
+
390
+ sample_weight : array-like of shape (n_samples,), default=None
391
+ Sample weights.
392
+
393
+ Returns
394
+ -------
395
+ score : float
396
+ D^2 of self.predict(X) w.r.t. y.
397
+ """
398
+ # TODO: Adapt link to User Guide in the docstring, once
399
+ # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.
400
+ #
401
+ # Note, default score defined in RegressorMixin is R^2 score.
402
+ # TODO: make D^2 a score function in module metrics (and thereby get
403
+ # input validation and so on)
404
+ raw_prediction = self._linear_predictor(X) # validates X
405
+ # required by losses
406
+ y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False)
407
+
408
+ if sample_weight is not None:
409
+ # Note that _check_sample_weight calls check_array(order="C") required by
410
+ # losses.
411
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
412
+
413
+ base_loss = self._base_loss
414
+
415
+ if not base_loss.in_y_true_range(y):
416
+ raise ValueError(
417
+ "Some value(s) of y are out of the valid range of the loss"
418
+ f" {base_loss.__name__}."
419
+ )
420
+
421
+ constant = np.average(
422
+ base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None),
423
+ weights=sample_weight,
424
+ )
425
+
426
+ # Missing factor of 2 in deviance cancels out.
427
+ deviance = base_loss(
428
+ y_true=y,
429
+ raw_prediction=raw_prediction,
430
+ sample_weight=sample_weight,
431
+ n_threads=1,
432
+ )
433
+ y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
434
+ deviance_null = base_loss(
435
+ y_true=y,
436
+ raw_prediction=np.tile(y_mean, y.shape[0]),
437
+ sample_weight=sample_weight,
438
+ n_threads=1,
439
+ )
440
+ return 1 - (deviance + constant) / (deviance_null + constant)
441
+
442
+ def _more_tags(self):
443
+ try:
444
+ # Create instance of BaseLoss if fit wasn't called yet. This is necessary as
445
+ # TweedieRegressor might set the used loss during fit different from
446
+ # self._base_loss.
447
+ base_loss = self._get_loss()
448
+ return {"requires_positive_y": not base_loss.in_y_true_range(-1.0)}
449
+ except (ValueError, AttributeError, TypeError):
450
+ # This happens when the link or power parameter of TweedieRegressor is
451
+ # invalid. We fallback on the default tags in that case.
452
+ return {}
453
+
454
+ def _get_loss(self):
455
+ """This is only necessary because of the link and power arguments of the
456
+ TweedieRegressor.
457
+
458
+ Note that we do not need to pass sample_weight to the loss class as this is
459
+ only needed to set loss.constant_hessian on which GLMs do not rely.
460
+ """
461
+ return HalfSquaredError()
462
+
463
+
464
+ class PoissonRegressor(_GeneralizedLinearRegressor):
465
+ """Generalized Linear Model with a Poisson distribution.
466
+
467
+ This regressor uses the 'log' link function.
468
+
469
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
470
+
471
+ .. versionadded:: 0.23
472
+
473
+ Parameters
474
+ ----------
475
+ alpha : float, default=1
476
+ Constant that multiplies the L2 penalty term and determines the
477
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
478
+ GLMs. In this case, the design matrix `X` must have full column rank
479
+ (no collinearities).
480
+ Values of `alpha` must be in the range `[0.0, inf)`.
481
+
482
+ fit_intercept : bool, default=True
483
+ Specifies if a constant (a.k.a. bias or intercept) should be
484
+ added to the linear predictor (`X @ coef + intercept`).
485
+
486
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
487
+ Algorithm to use in the optimization problem:
488
+
489
+ 'lbfgs'
490
+ Calls scipy's L-BFGS-B optimizer.
491
+
492
+ 'newton-cholesky'
493
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
494
+ iterated reweighted least squares) with an inner Cholesky based solver.
495
+ This solver is a good choice for `n_samples` >> `n_features`, especially
496
+ with one-hot encoded categorical features with rare categories. Be aware
497
+ that the memory usage of this solver has a quadratic dependency on
498
+ `n_features` because it explicitly computes the Hessian matrix.
499
+
500
+ .. versionadded:: 1.2
501
+
502
+ max_iter : int, default=100
503
+ The maximal number of iterations for the solver.
504
+ Values must be in the range `[1, inf)`.
505
+
506
+ tol : float, default=1e-4
507
+ Stopping criterion. For the lbfgs solver,
508
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
509
+ where ``g_j`` is the j-th component of the gradient (derivative) of
510
+ the objective function.
511
+ Values must be in the range `(0.0, inf)`.
512
+
513
+ warm_start : bool, default=False
514
+ If set to ``True``, reuse the solution of the previous call to ``fit``
515
+ as initialization for ``coef_`` and ``intercept_`` .
516
+
517
+ verbose : int, default=0
518
+ For the lbfgs solver set verbose to any positive number for verbosity.
519
+ Values must be in the range `[0, inf)`.
520
+
521
+ Attributes
522
+ ----------
523
+ coef_ : array of shape (n_features,)
524
+ Estimated coefficients for the linear predictor (`X @ coef_ +
525
+ intercept_`) in the GLM.
526
+
527
+ intercept_ : float
528
+ Intercept (a.k.a. bias) added to linear predictor.
529
+
530
+ n_features_in_ : int
531
+ Number of features seen during :term:`fit`.
532
+
533
+ .. versionadded:: 0.24
534
+
535
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
536
+ Names of features seen during :term:`fit`. Defined only when `X`
537
+ has feature names that are all strings.
538
+
539
+ .. versionadded:: 1.0
540
+
541
+ n_iter_ : int
542
+ Actual number of iterations used in the solver.
543
+
544
+ See Also
545
+ --------
546
+ TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
547
+
548
+ Examples
549
+ --------
550
+ >>> from sklearn import linear_model
551
+ >>> clf = linear_model.PoissonRegressor()
552
+ >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
553
+ >>> y = [12, 17, 22, 21]
554
+ >>> clf.fit(X, y)
555
+ PoissonRegressor()
556
+ >>> clf.score(X, y)
557
+ 0.990...
558
+ >>> clf.coef_
559
+ array([0.121..., 0.158...])
560
+ >>> clf.intercept_
561
+ 2.088...
562
+ >>> clf.predict([[1, 1], [3, 4]])
563
+ array([10.676..., 21.875...])
564
+ """
565
+
566
+ _parameter_constraints: dict = {
567
+ **_GeneralizedLinearRegressor._parameter_constraints
568
+ }
569
+
570
+ def __init__(
571
+ self,
572
+ *,
573
+ alpha=1.0,
574
+ fit_intercept=True,
575
+ solver="lbfgs",
576
+ max_iter=100,
577
+ tol=1e-4,
578
+ warm_start=False,
579
+ verbose=0,
580
+ ):
581
+ super().__init__(
582
+ alpha=alpha,
583
+ fit_intercept=fit_intercept,
584
+ solver=solver,
585
+ max_iter=max_iter,
586
+ tol=tol,
587
+ warm_start=warm_start,
588
+ verbose=verbose,
589
+ )
590
+
591
+ def _get_loss(self):
592
+ return HalfPoissonLoss()
593
+
594
+
595
+ class GammaRegressor(_GeneralizedLinearRegressor):
596
+ """Generalized Linear Model with a Gamma distribution.
597
+
598
+ This regressor uses the 'log' link function.
599
+
600
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
601
+
602
+ .. versionadded:: 0.23
603
+
604
+ Parameters
605
+ ----------
606
+ alpha : float, default=1
607
+ Constant that multiplies the L2 penalty term and determines the
608
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
609
+ GLMs. In this case, the design matrix `X` must have full column rank
610
+ (no collinearities).
611
+ Values of `alpha` must be in the range `[0.0, inf)`.
612
+
613
+ fit_intercept : bool, default=True
614
+ Specifies if a constant (a.k.a. bias or intercept) should be
615
+ added to the linear predictor `X @ coef_ + intercept_`.
616
+
617
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
618
+ Algorithm to use in the optimization problem:
619
+
620
+ 'lbfgs'
621
+ Calls scipy's L-BFGS-B optimizer.
622
+
623
+ 'newton-cholesky'
624
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
625
+ iterated reweighted least squares) with an inner Cholesky based solver.
626
+ This solver is a good choice for `n_samples` >> `n_features`, especially
627
+ with one-hot encoded categorical features with rare categories. Be aware
628
+ that the memory usage of this solver has a quadratic dependency on
629
+ `n_features` because it explicitly computes the Hessian matrix.
630
+
631
+ .. versionadded:: 1.2
632
+
633
+ max_iter : int, default=100
634
+ The maximal number of iterations for the solver.
635
+ Values must be in the range `[1, inf)`.
636
+
637
+ tol : float, default=1e-4
638
+ Stopping criterion. For the lbfgs solver,
639
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
640
+ where ``g_j`` is the j-th component of the gradient (derivative) of
641
+ the objective function.
642
+ Values must be in the range `(0.0, inf)`.
643
+
644
+ warm_start : bool, default=False
645
+ If set to ``True``, reuse the solution of the previous call to ``fit``
646
+ as initialization for `coef_` and `intercept_`.
647
+
648
+ verbose : int, default=0
649
+ For the lbfgs solver set verbose to any positive number for verbosity.
650
+ Values must be in the range `[0, inf)`.
651
+
652
+ Attributes
653
+ ----------
654
+ coef_ : array of shape (n_features,)
655
+ Estimated coefficients for the linear predictor (`X @ coef_ +
656
+ intercept_`) in the GLM.
657
+
658
+ intercept_ : float
659
+ Intercept (a.k.a. bias) added to linear predictor.
660
+
661
+ n_features_in_ : int
662
+ Number of features seen during :term:`fit`.
663
+
664
+ .. versionadded:: 0.24
665
+
666
+ n_iter_ : int
667
+ Actual number of iterations used in the solver.
668
+
669
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
670
+ Names of features seen during :term:`fit`. Defined only when `X`
671
+ has feature names that are all strings.
672
+
673
+ .. versionadded:: 1.0
674
+
675
+ See Also
676
+ --------
677
+ PoissonRegressor : Generalized Linear Model with a Poisson distribution.
678
+ TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
679
+
680
+ Examples
681
+ --------
682
+ >>> from sklearn import linear_model
683
+ >>> clf = linear_model.GammaRegressor()
684
+ >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
685
+ >>> y = [19, 26, 33, 30]
686
+ >>> clf.fit(X, y)
687
+ GammaRegressor()
688
+ >>> clf.score(X, y)
689
+ 0.773...
690
+ >>> clf.coef_
691
+ array([0.072..., 0.066...])
692
+ >>> clf.intercept_
693
+ 2.896...
694
+ >>> clf.predict([[1, 0], [2, 8]])
695
+ array([19.483..., 35.795...])
696
+ """
697
+
698
+ _parameter_constraints: dict = {
699
+ **_GeneralizedLinearRegressor._parameter_constraints
700
+ }
701
+
702
+ def __init__(
703
+ self,
704
+ *,
705
+ alpha=1.0,
706
+ fit_intercept=True,
707
+ solver="lbfgs",
708
+ max_iter=100,
709
+ tol=1e-4,
710
+ warm_start=False,
711
+ verbose=0,
712
+ ):
713
+ super().__init__(
714
+ alpha=alpha,
715
+ fit_intercept=fit_intercept,
716
+ solver=solver,
717
+ max_iter=max_iter,
718
+ tol=tol,
719
+ warm_start=warm_start,
720
+ verbose=verbose,
721
+ )
722
+
723
+ def _get_loss(self):
724
+ return HalfGammaLoss()
725
+
726
+
727
+ class TweedieRegressor(_GeneralizedLinearRegressor):
728
+ """Generalized Linear Model with a Tweedie distribution.
729
+
730
+ This estimator can be used to model different GLMs depending on the
731
+ ``power`` parameter, which determines the underlying distribution.
732
+
733
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
734
+
735
+ .. versionadded:: 0.23
736
+
737
+ Parameters
738
+ ----------
739
+ power : float, default=0
740
+ The power determines the underlying target distribution according
741
+ to the following table:
742
+
743
+ +-------+------------------------+
744
+ | Power | Distribution |
745
+ +=======+========================+
746
+ | 0 | Normal |
747
+ +-------+------------------------+
748
+ | 1 | Poisson |
749
+ +-------+------------------------+
750
+ | (1,2) | Compound Poisson Gamma |
751
+ +-------+------------------------+
752
+ | 2 | Gamma |
753
+ +-------+------------------------+
754
+ | 3 | Inverse Gaussian |
755
+ +-------+------------------------+
756
+
757
+ For ``0 < power < 1``, no distribution exists.
758
+
759
+ alpha : float, default=1
760
+ Constant that multiplies the L2 penalty term and determines the
761
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
762
+ GLMs. In this case, the design matrix `X` must have full column rank
763
+ (no collinearities).
764
+ Values of `alpha` must be in the range `[0.0, inf)`.
765
+
766
+ fit_intercept : bool, default=True
767
+ Specifies if a constant (a.k.a. bias or intercept) should be
768
+ added to the linear predictor (`X @ coef + intercept`).
769
+
770
+ link : {'auto', 'identity', 'log'}, default='auto'
771
+ The link function of the GLM, i.e. mapping from linear predictor
772
+ `X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
773
+ the link depending on the chosen `power` parameter as follows:
774
+
775
+ - 'identity' for ``power <= 0``, e.g. for the Normal distribution
776
+ - 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian
777
+ distributions
778
+
779
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
780
+ Algorithm to use in the optimization problem:
781
+
782
+ 'lbfgs'
783
+ Calls scipy's L-BFGS-B optimizer.
784
+
785
+ 'newton-cholesky'
786
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
787
+ iterated reweighted least squares) with an inner Cholesky based solver.
788
+ This solver is a good choice for `n_samples` >> `n_features`, especially
789
+ with one-hot encoded categorical features with rare categories. Be aware
790
+ that the memory usage of this solver has a quadratic dependency on
791
+ `n_features` because it explicitly computes the Hessian matrix.
792
+
793
+ .. versionadded:: 1.2
794
+
795
+ max_iter : int, default=100
796
+ The maximal number of iterations for the solver.
797
+ Values must be in the range `[1, inf)`.
798
+
799
+ tol : float, default=1e-4
800
+ Stopping criterion. For the lbfgs solver,
801
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
802
+ where ``g_j`` is the j-th component of the gradient (derivative) of
803
+ the objective function.
804
+ Values must be in the range `(0.0, inf)`.
805
+
806
+ warm_start : bool, default=False
807
+ If set to ``True``, reuse the solution of the previous call to ``fit``
808
+ as initialization for ``coef_`` and ``intercept_`` .
809
+
810
+ verbose : int, default=0
811
+ For the lbfgs solver set verbose to any positive number for verbosity.
812
+ Values must be in the range `[0, inf)`.
813
+
814
+ Attributes
815
+ ----------
816
+ coef_ : array of shape (n_features,)
817
+ Estimated coefficients for the linear predictor (`X @ coef_ +
818
+ intercept_`) in the GLM.
819
+
820
+ intercept_ : float
821
+ Intercept (a.k.a. bias) added to linear predictor.
822
+
823
+ n_iter_ : int
824
+ Actual number of iterations used in the solver.
825
+
826
+ n_features_in_ : int
827
+ Number of features seen during :term:`fit`.
828
+
829
+ .. versionadded:: 0.24
830
+
831
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
832
+ Names of features seen during :term:`fit`. Defined only when `X`
833
+ has feature names that are all strings.
834
+
835
+ .. versionadded:: 1.0
836
+
837
+ See Also
838
+ --------
839
+ PoissonRegressor : Generalized Linear Model with a Poisson distribution.
840
+ GammaRegressor : Generalized Linear Model with a Gamma distribution.
841
+
842
+ Examples
843
+ --------
844
+ >>> from sklearn import linear_model
845
+ >>> clf = linear_model.TweedieRegressor()
846
+ >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
847
+ >>> y = [2, 3.5, 5, 5.5]
848
+ >>> clf.fit(X, y)
849
+ TweedieRegressor()
850
+ >>> clf.score(X, y)
851
+ 0.839...
852
+ >>> clf.coef_
853
+ array([0.599..., 0.299...])
854
+ >>> clf.intercept_
855
+ 1.600...
856
+ >>> clf.predict([[1, 1], [3, 4]])
857
+ array([2.500..., 4.599...])
858
+ """
859
+
860
+ _parameter_constraints: dict = {
861
+ **_GeneralizedLinearRegressor._parameter_constraints,
862
+ "power": [Interval(Real, None, None, closed="neither")],
863
+ "link": [StrOptions({"auto", "identity", "log"})],
864
+ }
865
+
866
+ def __init__(
867
+ self,
868
+ *,
869
+ power=0.0,
870
+ alpha=1.0,
871
+ fit_intercept=True,
872
+ link="auto",
873
+ solver="lbfgs",
874
+ max_iter=100,
875
+ tol=1e-4,
876
+ warm_start=False,
877
+ verbose=0,
878
+ ):
879
+ super().__init__(
880
+ alpha=alpha,
881
+ fit_intercept=fit_intercept,
882
+ solver=solver,
883
+ max_iter=max_iter,
884
+ tol=tol,
885
+ warm_start=warm_start,
886
+ verbose=verbose,
887
+ )
888
+ self.link = link
889
+ self.power = power
890
+
891
+ def _get_loss(self):
892
+ if self.link == "auto":
893
+ if self.power <= 0:
894
+ # identity link
895
+ return HalfTweedieLossIdentity(power=self.power)
896
+ else:
897
+ # log link
898
+ return HalfTweedieLoss(power=self.power)
899
+
900
+ if self.link == "log":
901
+ return HalfTweedieLoss(power=self.power)
902
+
903
+ if self.link == "identity":
904
+ return HalfTweedieLossIdentity(power=self.power)
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # License: BSD 3 clause
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py ADDED
@@ -0,0 +1,1112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Christian Lorentzen <[email protected]>
2
+ #
3
+ # License: BSD 3 clause
4
+
5
+ import itertools
6
+ import warnings
7
+ from functools import partial
8
+
9
+ import numpy as np
10
+ import pytest
11
+ import scipy
12
+ from numpy.testing import assert_allclose
13
+ from scipy import linalg
14
+ from scipy.optimize import minimize, root
15
+
16
+ from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss
17
+ from sklearn._loss.link import IdentityLink, LogLink
18
+ from sklearn.base import clone
19
+ from sklearn.datasets import make_low_rank_matrix, make_regression
20
+ from sklearn.exceptions import ConvergenceWarning
21
+ from sklearn.linear_model import (
22
+ GammaRegressor,
23
+ PoissonRegressor,
24
+ Ridge,
25
+ TweedieRegressor,
26
+ )
27
+ from sklearn.linear_model._glm import _GeneralizedLinearRegressor
28
+ from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver
29
+ from sklearn.linear_model._linear_loss import LinearModelLoss
30
+ from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance
31
+ from sklearn.model_selection import train_test_split
32
+
33
+ SOLVERS = ["lbfgs", "newton-cholesky"]
34
+
35
+
36
+ class BinomialRegressor(_GeneralizedLinearRegressor):
37
+ def _get_loss(self):
38
+ return HalfBinomialLoss()
39
+
40
+
41
+ def _special_minimize(fun, grad, x, tol_NM, tol):
42
+ # Find good starting point by Nelder-Mead
43
+ res_NM = minimize(
44
+ fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM}
45
+ )
46
+ # Now refine via root finding on the gradient of the function, which is
47
+ # more precise than minimizing the function itself.
48
+ res = root(
49
+ grad,
50
+ res_NM.x,
51
+ method="lm",
52
+ options={"ftol": tol, "xtol": tol, "gtol": tol},
53
+ )
54
+ return res.x
55
+
56
+
57
+ @pytest.fixture(scope="module")
58
+ def regression_data():
59
+ X, y = make_regression(
60
+ n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2
61
+ )
62
+ return X, y
63
+
64
+
65
+ @pytest.fixture(
66
+ params=itertools.product(
67
+ ["long", "wide"],
68
+ [
69
+ BinomialRegressor(),
70
+ PoissonRegressor(),
71
+ GammaRegressor(),
72
+ # TweedieRegressor(power=3.0), # too difficult
73
+ # TweedieRegressor(power=0, link="log"), # too difficult
74
+ TweedieRegressor(power=1.5),
75
+ ],
76
+ ),
77
+ ids=lambda param: f"{param[0]}-{param[1]}",
78
+ )
79
+ def glm_dataset(global_random_seed, request):
80
+ """Dataset with GLM solutions, well conditioned X.
81
+
82
+ This is inspired by ols_ridge_dataset in test_ridge.py.
83
+
84
+ The construction is based on the SVD decomposition of X = U S V'.
85
+
86
+ Parameters
87
+ ----------
88
+ type : {"long", "wide"}
89
+ If "long", then n_samples > n_features.
90
+ If "wide", then n_features > n_samples.
91
+ model : a GLM model
92
+
93
+ For "wide", we return the minimum norm solution:
94
+
95
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
96
+
97
+ Note that the deviance is always minimized if y = inverse_link(X w) is possible to
98
+ achieve, which it is in the wide data case. Therefore, we can construct the
99
+ solution with minimum norm like (wide) OLS:
100
+
101
+ min ||w||_2 subject to link(y) = raw_prediction = X w
102
+
103
+ Returns
104
+ -------
105
+ model : GLM model
106
+ X : ndarray
107
+ Last column of 1, i.e. intercept.
108
+ y : ndarray
109
+ coef_unpenalized : ndarray
110
+ Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in
111
+ case of ambiguity)
112
+ Last coefficient is intercept.
113
+ coef_penalized : ndarray
114
+ GLM solution with alpha=l2_reg_strength=1, i.e.
115
+ min 1/n * sum(loss) + ||w[:-1]||_2^2.
116
+ Last coefficient is intercept.
117
+ l2_reg_strength : float
118
+ Always equal 1.
119
+ """
120
+ data_type, model = request.param
121
+ # Make larger dim more than double as big as the smaller one.
122
+ # This helps when constructing singular matrices like (X, X).
123
+ if data_type == "long":
124
+ n_samples, n_features = 12, 4
125
+ else:
126
+ n_samples, n_features = 4, 12
127
+ k = min(n_samples, n_features)
128
+ rng = np.random.RandomState(global_random_seed)
129
+ X = make_low_rank_matrix(
130
+ n_samples=n_samples,
131
+ n_features=n_features,
132
+ effective_rank=k,
133
+ tail_strength=0.1,
134
+ random_state=rng,
135
+ )
136
+ X[:, -1] = 1 # last columns acts as intercept
137
+ U, s, Vt = linalg.svd(X, full_matrices=False)
138
+ assert np.all(s > 1e-3) # to be sure
139
+ assert np.max(s) / np.min(s) < 100 # condition number of X
140
+
141
+ if data_type == "long":
142
+ coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)
143
+ coef_unpenalized *= rng.choice([-1, 1], size=n_features)
144
+ raw_prediction = X @ coef_unpenalized
145
+ else:
146
+ raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)
147
+ # minimum norm solution min ||w||_2 such that raw_prediction = X w:
148
+ # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction
149
+ coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction
150
+
151
+ linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True)
152
+ sw = np.full(shape=n_samples, fill_value=1 / n_samples)
153
+ y = linear_loss.base_loss.link.inverse(raw_prediction)
154
+
155
+ # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with
156
+ # optimizer. Note that the problem is well conditioned such that we get accurate
157
+ # results.
158
+ l2_reg_strength = 1
159
+ fun = partial(
160
+ linear_loss.loss,
161
+ X=X[:, :-1],
162
+ y=y,
163
+ sample_weight=sw,
164
+ l2_reg_strength=l2_reg_strength,
165
+ )
166
+ grad = partial(
167
+ linear_loss.gradient,
168
+ X=X[:, :-1],
169
+ y=y,
170
+ sample_weight=sw,
171
+ l2_reg_strength=l2_reg_strength,
172
+ )
173
+ coef_penalized_with_intercept = _special_minimize(
174
+ fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14
175
+ )
176
+
177
+ linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False)
178
+ fun = partial(
179
+ linear_loss.loss,
180
+ X=X[:, :-1],
181
+ y=y,
182
+ sample_weight=sw,
183
+ l2_reg_strength=l2_reg_strength,
184
+ )
185
+ grad = partial(
186
+ linear_loss.gradient,
187
+ X=X[:, :-1],
188
+ y=y,
189
+ sample_weight=sw,
190
+ l2_reg_strength=l2_reg_strength,
191
+ )
192
+ coef_penalized_without_intercept = _special_minimize(
193
+ fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14
194
+ )
195
+
196
+ # To be sure
197
+ assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(
198
+ coef_unpenalized
199
+ )
200
+
201
+ return (
202
+ model,
203
+ X,
204
+ y,
205
+ coef_unpenalized,
206
+ coef_penalized_with_intercept,
207
+ coef_penalized_without_intercept,
208
+ l2_reg_strength,
209
+ )
210
+
211
+
212
+ @pytest.mark.parametrize("solver", SOLVERS)
213
+ @pytest.mark.parametrize("fit_intercept", [False, True])
214
+ def test_glm_regression(solver, fit_intercept, glm_dataset):
215
+ """Test that GLM converges for all solvers to correct solution.
216
+
217
+ We work with a simple constructed data set with known solution.
218
+ """
219
+ model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
220
+ params = dict(
221
+ alpha=alpha,
222
+ fit_intercept=fit_intercept,
223
+ solver=solver,
224
+ tol=1e-12,
225
+ max_iter=1000,
226
+ )
227
+
228
+ model = clone(model).set_params(**params)
229
+ X = X[:, :-1] # remove intercept
230
+ if fit_intercept:
231
+ coef = coef_with_intercept
232
+ intercept = coef[-1]
233
+ coef = coef[:-1]
234
+ else:
235
+ coef = coef_without_intercept
236
+ intercept = 0
237
+
238
+ model.fit(X, y)
239
+
240
+ rtol = 5e-5 if solver == "lbfgs" else 1e-9
241
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
242
+ assert_allclose(model.coef_, coef, rtol=rtol)
243
+
244
+ # Same with sample_weight.
245
+ model = (
246
+ clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
247
+ )
248
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
249
+ assert_allclose(model.coef_, coef, rtol=rtol)
250
+
251
+
252
+ @pytest.mark.parametrize("solver", SOLVERS)
253
+ @pytest.mark.parametrize("fit_intercept", [True, False])
254
+ def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset):
255
+ """Test that GLM converges for all solvers to correct solution on hstacked data.
256
+
257
+ We work with a simple constructed data set with known solution.
258
+ Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
259
+ For long X, [X, X] is still a long but singular matrix.
260
+ """
261
+ model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
262
+ n_samples, n_features = X.shape
263
+ params = dict(
264
+ alpha=alpha / 2,
265
+ fit_intercept=fit_intercept,
266
+ solver=solver,
267
+ tol=1e-12,
268
+ max_iter=1000,
269
+ )
270
+
271
+ model = clone(model).set_params(**params)
272
+ X = X[:, :-1] # remove intercept
273
+ X = 0.5 * np.concatenate((X, X), axis=1)
274
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
275
+ if fit_intercept:
276
+ coef = coef_with_intercept
277
+ intercept = coef[-1]
278
+ coef = coef[:-1]
279
+ else:
280
+ coef = coef_without_intercept
281
+ intercept = 0
282
+
283
+ with warnings.catch_warnings():
284
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
285
+ # cases should be considered a bug or not. In the mean time we don't
286
+ # fail when the assertions below pass irrespective of the presence of
287
+ # the warning.
288
+ warnings.simplefilter("ignore", ConvergenceWarning)
289
+ model.fit(X, y)
290
+
291
+ rtol = 2e-4 if solver == "lbfgs" else 5e-9
292
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
293
+ assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol)
294
+
295
+
296
+ @pytest.mark.parametrize("solver", SOLVERS)
297
+ @pytest.mark.parametrize("fit_intercept", [True, False])
298
+ def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
299
+ """Test that GLM converges for all solvers to correct solution on vstacked data.
300
+
301
+ We work with a simple constructed data set with known solution.
302
+ Fit on [X] with alpha is the same as fit on [X], [y]
303
+ [X], [y] with 1 * alpha.
304
+ It is the same alpha as the average loss stays the same.
305
+ For wide X, [X', X'] is a singular matrix.
306
+ """
307
+ model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
308
+ n_samples, n_features = X.shape
309
+ params = dict(
310
+ alpha=alpha,
311
+ fit_intercept=fit_intercept,
312
+ solver=solver,
313
+ tol=1e-12,
314
+ max_iter=1000,
315
+ )
316
+
317
+ model = clone(model).set_params(**params)
318
+ X = X[:, :-1] # remove intercept
319
+ X = np.concatenate((X, X), axis=0)
320
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
321
+ y = np.r_[y, y]
322
+ if fit_intercept:
323
+ coef = coef_with_intercept
324
+ intercept = coef[-1]
325
+ coef = coef[:-1]
326
+ else:
327
+ coef = coef_without_intercept
328
+ intercept = 0
329
+ model.fit(X, y)
330
+
331
+ rtol = 3e-5 if solver == "lbfgs" else 5e-9
332
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
333
+ assert_allclose(model.coef_, coef, rtol=rtol)
334
+
335
+
336
+ @pytest.mark.parametrize("solver", SOLVERS)
337
+ @pytest.mark.parametrize("fit_intercept", [True, False])
338
+ def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
339
+ """Test that unpenalized GLM converges for all solvers to correct solution.
340
+
341
+ We work with a simple constructed data set with known solution.
342
+ Note: This checks the minimum norm solution for wide X, i.e.
343
+ n_samples < n_features:
344
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
345
+ """
346
+ model, X, y, coef, _, _, _ = glm_dataset
347
+ n_samples, n_features = X.shape
348
+ alpha = 0 # unpenalized
349
+ params = dict(
350
+ alpha=alpha,
351
+ fit_intercept=fit_intercept,
352
+ solver=solver,
353
+ tol=1e-12,
354
+ max_iter=1000,
355
+ )
356
+
357
+ model = clone(model).set_params(**params)
358
+ if fit_intercept:
359
+ X = X[:, :-1] # remove intercept
360
+ intercept = coef[-1]
361
+ coef = coef[:-1]
362
+ else:
363
+ intercept = 0
364
+
365
+ with warnings.catch_warnings():
366
+ if solver.startswith("newton") and n_samples < n_features:
367
+ # The newton solvers should warn and automatically fallback to LBFGS
368
+ # in this case. The model should still converge.
369
+ warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
370
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
371
+ # cases should be considered a bug or not. In the mean time we don't
372
+ # fail when the assertions below pass irrespective of the presence of
373
+ # the warning.
374
+ warnings.filterwarnings("ignore", category=ConvergenceWarning)
375
+ model.fit(X, y)
376
+
377
+ # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
378
+ # for the wide/fat case with n_features > n_samples. Most current GLM solvers do
379
+ # NOT return the minimum norm solution with fit_intercept=True.
380
+ if n_samples > n_features:
381
+ rtol = 5e-5 if solver == "lbfgs" else 1e-7
382
+ assert model.intercept_ == pytest.approx(intercept)
383
+ assert_allclose(model.coef_, coef, rtol=rtol)
384
+ else:
385
+ # As it is an underdetermined problem, prediction = y. The following shows that
386
+ # we get a solution, i.e. a (non-unique) minimum of the objective function ...
387
+ rtol = 5e-5
388
+ if solver == "newton-cholesky":
389
+ rtol = 5e-4
390
+ assert_allclose(model.predict(X), y, rtol=rtol)
391
+
392
+ norm_solution = np.linalg.norm(np.r_[intercept, coef])
393
+ norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
394
+ if solver == "newton-cholesky":
395
+ # XXX: This solver shows random behaviour. Sometimes it finds solutions
396
+ # with norm_model <= norm_solution! So we check conditionally.
397
+ if norm_model < (1 + 1e-12) * norm_solution:
398
+ assert model.intercept_ == pytest.approx(intercept)
399
+ assert_allclose(model.coef_, coef, rtol=rtol)
400
+ elif solver == "lbfgs" and fit_intercept:
401
+ # But it is not the minimum norm solution. Otherwise the norms would be
402
+ # equal.
403
+ assert norm_model > (1 + 1e-12) * norm_solution
404
+
405
+ # See https://github.com/scikit-learn/scikit-learn/issues/23670.
406
+ # Note: Even adding a tiny penalty does not give the minimal norm solution.
407
+ # XXX: We could have naively expected LBFGS to find the minimal norm
408
+ # solution by adding a very small penalty. Even that fails for a reason we
409
+ # do not properly understand at this point.
410
+ else:
411
+ # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
412
+ # solution on this problem.
413
+ # XXX: Do we have any theoretical guarantees why this should be the case?
414
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
415
+ assert_allclose(model.coef_, coef, rtol=rtol)
416
+
417
+
418
+ @pytest.mark.parametrize("solver", SOLVERS)
419
+ @pytest.mark.parametrize("fit_intercept", [True, False])
420
+ def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
421
+ """Test that unpenalized GLM converges for all solvers to correct solution.
422
+
423
+ We work with a simple constructed data set with known solution.
424
+ GLM fit on [X] is the same as fit on [X, X]/2.
425
+ For long X, [X, X] is a singular matrix and we check against the minimum norm
426
+ solution:
427
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
428
+ """
429
+ model, X, y, coef, _, _, _ = glm_dataset
430
+ n_samples, n_features = X.shape
431
+ alpha = 0 # unpenalized
432
+ params = dict(
433
+ alpha=alpha,
434
+ fit_intercept=fit_intercept,
435
+ solver=solver,
436
+ tol=1e-12,
437
+ max_iter=1000,
438
+ )
439
+
440
+ model = clone(model).set_params(**params)
441
+ if fit_intercept:
442
+ intercept = coef[-1]
443
+ coef = coef[:-1]
444
+ if n_samples > n_features:
445
+ X = X[:, :-1] # remove intercept
446
+ X = 0.5 * np.concatenate((X, X), axis=1)
447
+ else:
448
+ # To know the minimum norm solution, we keep one intercept column and do
449
+ # not divide by 2. Later on, we must take special care.
450
+ X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]
451
+ else:
452
+ intercept = 0
453
+ X = 0.5 * np.concatenate((X, X), axis=1)
454
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
455
+
456
+ with warnings.catch_warnings():
457
+ if solver.startswith("newton"):
458
+ # The newton solvers should warn and automatically fallback to LBFGS
459
+ # in this case. The model should still converge.
460
+ warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
461
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
462
+ # cases should be considered a bug or not. In the mean time we don't
463
+ # fail when the assertions below pass irrespective of the presence of
464
+ # the warning.
465
+ warnings.filterwarnings("ignore", category=ConvergenceWarning)
466
+ model.fit(X, y)
467
+
468
+ if fit_intercept and n_samples < n_features:
469
+ # Here we take special care.
470
+ model_intercept = 2 * model.intercept_
471
+ model_coef = 2 * model.coef_[:-1] # exclude the other intercept term.
472
+ # For minimum norm solution, we would have
473
+ # assert model.intercept_ == pytest.approx(model.coef_[-1])
474
+ else:
475
+ model_intercept = model.intercept_
476
+ model_coef = model.coef_
477
+
478
+ if n_samples > n_features:
479
+ assert model_intercept == pytest.approx(intercept)
480
+ rtol = 1e-4
481
+ assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol)
482
+ else:
483
+ # As it is an underdetermined problem, prediction = y. The following shows that
484
+ # we get a solution, i.e. a (non-unique) minimum of the objective function ...
485
+ rtol = 1e-6 if solver == "lbfgs" else 5e-6
486
+ assert_allclose(model.predict(X), y, rtol=rtol)
487
+ if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky":
488
+ # Same as in test_glm_regression_unpenalized.
489
+ # But it is not the minimum norm solution. Otherwise the norms would be
490
+ # equal.
491
+ norm_solution = np.linalg.norm(
492
+ 0.5 * np.r_[intercept, intercept, coef, coef]
493
+ )
494
+ norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
495
+ assert norm_model > (1 + 1e-12) * norm_solution
496
+ # For minimum norm solution, we would have
497
+ # assert model.intercept_ == pytest.approx(model.coef_[-1])
498
+ else:
499
+ assert model_intercept == pytest.approx(intercept, rel=5e-6)
500
+ assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4)
501
+
502
+
503
+ @pytest.mark.parametrize("solver", SOLVERS)
504
+ @pytest.mark.parametrize("fit_intercept", [True, False])
505
+ def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset):
506
+ """Test that unpenalized GLM converges for all solvers to correct solution.
507
+
508
+ We work with a simple constructed data set with known solution.
509
+ GLM fit on [X] is the same as fit on [X], [y]
510
+ [X], [y].
511
+ For wide X, [X', X'] is a singular matrix and we check against the minimum norm
512
+ solution:
513
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
514
+ """
515
+ model, X, y, coef, _, _, _ = glm_dataset
516
+ n_samples, n_features = X.shape
517
+ alpha = 0 # unpenalized
518
+ params = dict(
519
+ alpha=alpha,
520
+ fit_intercept=fit_intercept,
521
+ solver=solver,
522
+ tol=1e-12,
523
+ max_iter=1000,
524
+ )
525
+
526
+ model = clone(model).set_params(**params)
527
+ if fit_intercept:
528
+ X = X[:, :-1] # remove intercept
529
+ intercept = coef[-1]
530
+ coef = coef[:-1]
531
+ else:
532
+ intercept = 0
533
+ X = np.concatenate((X, X), axis=0)
534
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
535
+ y = np.r_[y, y]
536
+
537
+ with warnings.catch_warnings():
538
+ if solver.startswith("newton") and n_samples < n_features:
539
+ # The newton solvers should warn and automatically fallback to LBFGS
540
+ # in this case. The model should still converge.
541
+ warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
542
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
543
+ # cases should be considered a bug or not. In the mean time we don't
544
+ # fail when the assertions below pass irrespective of the presence of
545
+ # the warning.
546
+ warnings.filterwarnings("ignore", category=ConvergenceWarning)
547
+ model.fit(X, y)
548
+
549
+ if n_samples > n_features:
550
+ rtol = 5e-5 if solver == "lbfgs" else 1e-6
551
+ assert model.intercept_ == pytest.approx(intercept)
552
+ assert_allclose(model.coef_, coef, rtol=rtol)
553
+ else:
554
+ # As it is an underdetermined problem, prediction = y. The following shows that
555
+ # we get a solution, i.e. a (non-unique) minimum of the objective function ...
556
+ rtol = 1e-6 if solver == "lbfgs" else 5e-6
557
+ assert_allclose(model.predict(X), y, rtol=rtol)
558
+
559
+ norm_solution = np.linalg.norm(np.r_[intercept, coef])
560
+ norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
561
+ if solver == "newton-cholesky":
562
+ # XXX: This solver shows random behaviour. Sometimes it finds solutions
563
+ # with norm_model <= norm_solution! So we check conditionally.
564
+ if not (norm_model > (1 + 1e-12) * norm_solution):
565
+ assert model.intercept_ == pytest.approx(intercept)
566
+ assert_allclose(model.coef_, coef, rtol=1e-4)
567
+ elif solver == "lbfgs" and fit_intercept:
568
+ # Same as in test_glm_regression_unpenalized.
569
+ # But it is not the minimum norm solution. Otherwise the norms would be
570
+ # equal.
571
+ assert norm_model > (1 + 1e-12) * norm_solution
572
+ else:
573
+ rtol = 1e-5 if solver == "newton-cholesky" else 1e-4
574
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
575
+ assert_allclose(model.coef_, coef, rtol=rtol)
576
+
577
+
578
+ def test_sample_weights_validation():
579
+ """Test the raised errors in the validation of sample_weight."""
580
+ # scalar value but not positive
581
+ X = [[1]]
582
+ y = [1]
583
+ weights = 0
584
+ glm = _GeneralizedLinearRegressor()
585
+
586
+ # Positive weights are accepted
587
+ glm.fit(X, y, sample_weight=1)
588
+
589
+ # 2d array
590
+ weights = [[0]]
591
+ with pytest.raises(ValueError, match="must be 1D array or scalar"):
592
+ glm.fit(X, y, weights)
593
+
594
+ # 1d but wrong length
595
+ weights = [1, 0]
596
+ msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
597
+ with pytest.raises(ValueError, match=msg):
598
+ glm.fit(X, y, weights)
599
+
600
+
601
+ @pytest.mark.parametrize(
602
+ "glm",
603
+ [
604
+ TweedieRegressor(power=3),
605
+ PoissonRegressor(),
606
+ GammaRegressor(),
607
+ TweedieRegressor(power=1.5),
608
+ ],
609
+ )
610
+ def test_glm_wrong_y_range(glm):
611
+ y = np.array([-1, 2])
612
+ X = np.array([[1], [1]])
613
+ msg = r"Some value\(s\) of y are out of the valid range of the loss"
614
+ with pytest.raises(ValueError, match=msg):
615
+ glm.fit(X, y)
616
+
617
+
618
+ @pytest.mark.parametrize("fit_intercept", [False, True])
619
+ def test_glm_identity_regression(fit_intercept):
620
+ """Test GLM regression with identity link on a simple dataset."""
621
+ coef = [1.0, 2.0]
622
+ X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
623
+ y = np.dot(X, coef)
624
+ glm = _GeneralizedLinearRegressor(
625
+ alpha=0,
626
+ fit_intercept=fit_intercept,
627
+ tol=1e-12,
628
+ )
629
+ if fit_intercept:
630
+ glm.fit(X[:, 1:], y)
631
+ assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
632
+ assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
633
+ else:
634
+ glm.fit(X, y)
635
+ assert_allclose(glm.coef_, coef, rtol=1e-12)
636
+
637
+
638
+ @pytest.mark.parametrize("fit_intercept", [False, True])
639
+ @pytest.mark.parametrize("alpha", [0.0, 1.0])
640
+ @pytest.mark.parametrize(
641
+ "GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor]
642
+ )
643
+ def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
644
+ """Test that the impact of sample_weight is consistent"""
645
+ rng = np.random.RandomState(0)
646
+ n_samples, n_features = 10, 5
647
+
648
+ X = rng.rand(n_samples, n_features)
649
+ y = rng.rand(n_samples)
650
+ glm_params = dict(alpha=alpha, fit_intercept=fit_intercept)
651
+
652
+ glm = GLMEstimator(**glm_params).fit(X, y)
653
+ coef = glm.coef_.copy()
654
+
655
+ # sample_weight=np.ones(..) should be equivalent to sample_weight=None
656
+ sample_weight = np.ones(y.shape)
657
+ glm.fit(X, y, sample_weight=sample_weight)
658
+ assert_allclose(glm.coef_, coef, rtol=1e-12)
659
+
660
+ # sample_weight are normalized to 1 so, scaling them has no effect
661
+ sample_weight = 2 * np.ones(y.shape)
662
+ glm.fit(X, y, sample_weight=sample_weight)
663
+ assert_allclose(glm.coef_, coef, rtol=1e-12)
664
+
665
+ # setting one element of sample_weight to 0 is equivalent to removing
666
+ # the corresponding sample
667
+ sample_weight = np.ones(y.shape)
668
+ sample_weight[-1] = 0
669
+ glm.fit(X, y, sample_weight=sample_weight)
670
+ coef1 = glm.coef_.copy()
671
+ glm.fit(X[:-1], y[:-1])
672
+ assert_allclose(glm.coef_, coef1, rtol=1e-12)
673
+
674
+ # check that multiplying sample_weight by 2 is equivalent
675
+ # to repeating corresponding samples twice
676
+ X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
677
+ y2 = np.concatenate([y, y[: n_samples // 2]])
678
+ sample_weight_1 = np.ones(len(y))
679
+ sample_weight_1[: n_samples // 2] = 2
680
+
681
+ glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1)
682
+
683
+ glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None)
684
+ assert_allclose(glm1.coef_, glm2.coef_)
685
+
686
+
687
+ @pytest.mark.parametrize("solver", SOLVERS)
688
+ @pytest.mark.parametrize("fit_intercept", [True, False])
689
+ @pytest.mark.parametrize(
690
+ "estimator",
691
+ [
692
+ PoissonRegressor(),
693
+ GammaRegressor(),
694
+ TweedieRegressor(power=3.0),
695
+ TweedieRegressor(power=0, link="log"),
696
+ TweedieRegressor(power=1.5),
697
+ TweedieRegressor(power=4.5),
698
+ ],
699
+ )
700
+ def test_glm_log_regression(solver, fit_intercept, estimator):
701
+ """Test GLM regression with log link on a simple dataset."""
702
+ coef = [0.2, -0.1]
703
+ X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T
704
+ y = np.exp(np.dot(X, coef))
705
+ glm = clone(estimator).set_params(
706
+ alpha=0,
707
+ fit_intercept=fit_intercept,
708
+ solver=solver,
709
+ tol=1e-8,
710
+ )
711
+ if fit_intercept:
712
+ res = glm.fit(X[:, :-1], y)
713
+ assert_allclose(res.coef_, coef[:-1], rtol=1e-6)
714
+ assert_allclose(res.intercept_, coef[-1], rtol=1e-6)
715
+ else:
716
+ res = glm.fit(X, y)
717
+ assert_allclose(res.coef_, coef, rtol=2e-6)
718
+
719
+
720
+ @pytest.mark.parametrize("solver", SOLVERS)
721
+ @pytest.mark.parametrize("fit_intercept", [True, False])
722
+ def test_warm_start(solver, fit_intercept, global_random_seed):
723
+ n_samples, n_features = 100, 10
724
+ X, y = make_regression(
725
+ n_samples=n_samples,
726
+ n_features=n_features,
727
+ n_informative=n_features - 2,
728
+ bias=fit_intercept * 1.0,
729
+ noise=1.0,
730
+ random_state=global_random_seed,
731
+ )
732
+ y = np.abs(y) # Poisson requires non-negative targets.
733
+ alpha = 1
734
+ params = {
735
+ "solver": solver,
736
+ "fit_intercept": fit_intercept,
737
+ "tol": 1e-10,
738
+ }
739
+
740
+ glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params)
741
+ glm1.fit(X, y)
742
+
743
+ glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params)
744
+ # As we intentionally set max_iter=1 such that the solver should raise a
745
+ # ConvergenceWarning.
746
+ with pytest.warns(ConvergenceWarning):
747
+ glm2.fit(X, y)
748
+
749
+ linear_loss = LinearModelLoss(
750
+ base_loss=glm1._get_loss(),
751
+ fit_intercept=fit_intercept,
752
+ )
753
+ sw = np.full_like(y, fill_value=1 / n_samples)
754
+
755
+ objective_glm1 = linear_loss.loss(
756
+ coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_,
757
+ X=X,
758
+ y=y,
759
+ sample_weight=sw,
760
+ l2_reg_strength=alpha,
761
+ )
762
+ objective_glm2 = linear_loss.loss(
763
+ coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_,
764
+ X=X,
765
+ y=y,
766
+ sample_weight=sw,
767
+ l2_reg_strength=alpha,
768
+ )
769
+ assert objective_glm1 < objective_glm2
770
+
771
+ glm2.set_params(max_iter=1000)
772
+ glm2.fit(X, y)
773
+ # The two models are not exactly identical since the lbfgs solver
774
+ # computes the approximate hessian from previous iterations, which
775
+ # will not be strictly identical in the case of a warm start.
776
+ rtol = 2e-4 if solver == "lbfgs" else 1e-9
777
+ assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol)
778
+ assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5)
779
+
780
+
781
+ @pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)])
782
+ @pytest.mark.parametrize("fit_intercept", [True, False])
783
+ @pytest.mark.parametrize("sample_weight", [None, True])
784
+ def test_normal_ridge_comparison(
785
+ n_samples, n_features, fit_intercept, sample_weight, request
786
+ ):
787
+ """Compare with Ridge regression for Normal distributions."""
788
+ test_size = 10
789
+ X, y = make_regression(
790
+ n_samples=n_samples + test_size,
791
+ n_features=n_features,
792
+ n_informative=n_features - 2,
793
+ noise=0.5,
794
+ random_state=42,
795
+ )
796
+
797
+ if n_samples > n_features:
798
+ ridge_params = {"solver": "svd"}
799
+ else:
800
+ ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
801
+
802
+ (
803
+ X_train,
804
+ X_test,
805
+ y_train,
806
+ y_test,
807
+ ) = train_test_split(X, y, test_size=test_size, random_state=0)
808
+
809
+ alpha = 1.0
810
+ if sample_weight is None:
811
+ sw_train = None
812
+ alpha_ridge = alpha * n_samples
813
+ else:
814
+ sw_train = np.random.RandomState(0).rand(len(y_train))
815
+ alpha_ridge = alpha * sw_train.sum()
816
+
817
+ # GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
818
+ ridge = Ridge(
819
+ alpha=alpha_ridge,
820
+ random_state=42,
821
+ fit_intercept=fit_intercept,
822
+ **ridge_params,
823
+ )
824
+ ridge.fit(X_train, y_train, sample_weight=sw_train)
825
+
826
+ glm = _GeneralizedLinearRegressor(
827
+ alpha=alpha,
828
+ fit_intercept=fit_intercept,
829
+ max_iter=300,
830
+ tol=1e-5,
831
+ )
832
+ glm.fit(X_train, y_train, sample_weight=sw_train)
833
+ assert glm.coef_.shape == (X.shape[1],)
834
+ assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
835
+ assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
836
+ assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
837
+ assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
838
+
839
+
840
+ @pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"])
841
+ def test_poisson_glmnet(solver):
842
+ """Compare Poisson regression with L2 regularization and LogLink to glmnet"""
843
+ # library("glmnet")
844
+ # options(digits=10)
845
+ # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
846
+ # x <- data.matrix(df[,c("a", "b")])
847
+ # y <- df$y
848
+ # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
849
+ # standardize=F, thresh=1e-10, nlambda=10000)
850
+ # coef(fit, s=1)
851
+ # (Intercept) -0.12889386979
852
+ # a 0.29019207995
853
+ # b 0.03741173122
854
+ X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
855
+ y = np.array([0, 1, 1, 2])
856
+ glm = PoissonRegressor(
857
+ alpha=1,
858
+ fit_intercept=True,
859
+ tol=1e-7,
860
+ max_iter=300,
861
+ solver=solver,
862
+ )
863
+ glm.fit(X, y)
864
+ assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
865
+ assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
866
+
867
+
868
+ def test_convergence_warning(regression_data):
869
+ X, y = regression_data
870
+
871
+ est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20)
872
+ with pytest.warns(ConvergenceWarning):
873
+ est.fit(X, y)
874
+
875
+
876
+ @pytest.mark.parametrize(
877
+ "name, link_class", [("identity", IdentityLink), ("log", LogLink)]
878
+ )
879
+ def test_tweedie_link_argument(name, link_class):
880
+ """Test GLM link argument set as string."""
881
+ y = np.array([0.1, 0.5]) # in range of all distributions
882
+ X = np.array([[1], [2]])
883
+ glm = TweedieRegressor(power=1, link=name).fit(X, y)
884
+ assert isinstance(glm._base_loss.link, link_class)
885
+
886
+
887
+ @pytest.mark.parametrize(
888
+ "power, expected_link_class",
889
+ [
890
+ (0, IdentityLink), # normal
891
+ (1, LogLink), # poisson
892
+ (2, LogLink), # gamma
893
+ (3, LogLink), # inverse-gaussian
894
+ ],
895
+ )
896
+ def test_tweedie_link_auto(power, expected_link_class):
897
+ """Test that link='auto' delivers the expected link function"""
898
+ y = np.array([0.1, 0.5]) # in range of all distributions
899
+ X = np.array([[1], [2]])
900
+ glm = TweedieRegressor(link="auto", power=power).fit(X, y)
901
+ assert isinstance(glm._base_loss.link, expected_link_class)
902
+
903
+
904
+ @pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3])
905
+ @pytest.mark.parametrize("link", ["log", "identity"])
906
+ def test_tweedie_score(regression_data, power, link):
907
+ """Test that GLM score equals d2_tweedie_score for Tweedie losses."""
908
+ X, y = regression_data
909
+ # make y positive
910
+ y = np.abs(y) + 1.0
911
+ glm = TweedieRegressor(power=power, link=link).fit(X, y)
912
+ assert glm.score(X, y) == pytest.approx(
913
+ d2_tweedie_score(y, glm.predict(X), power=power)
914
+ )
915
+
916
+
917
+ @pytest.mark.parametrize(
918
+ "estimator, value",
919
+ [
920
+ (PoissonRegressor(), True),
921
+ (GammaRegressor(), True),
922
+ (TweedieRegressor(power=1.5), True),
923
+ (TweedieRegressor(power=0), False),
924
+ ],
925
+ )
926
+ def test_tags(estimator, value):
927
+ assert estimator._get_tags()["requires_positive_y"] is value
928
+
929
+
930
+ def test_linalg_warning_with_newton_solver(global_random_seed):
931
+ newton_solver = "newton-cholesky"
932
+ rng = np.random.RandomState(global_random_seed)
933
+ # Use at least 20 samples to reduce the likelihood of getting a degenerate
934
+ # dataset for any global_random_seed.
935
+ X_orig = rng.normal(size=(20, 3))
936
+ y = rng.poisson(
937
+ np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0]
938
+ ).astype(np.float64)
939
+
940
+ # Collinear variation of the same input features.
941
+ X_collinear = np.hstack([X_orig] * 10)
942
+
943
+ # Let's consider the deviance of a constant baseline on this problem.
944
+ baseline_pred = np.full_like(y, y.mean())
945
+ constant_model_deviance = mean_poisson_deviance(y, baseline_pred)
946
+ assert constant_model_deviance > 1.0
947
+
948
+ # No warning raised on well-conditioned design, even without regularization.
949
+ tol = 1e-10
950
+ with warnings.catch_warnings():
951
+ warnings.simplefilter("error")
952
+ reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y)
953
+ original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig))
954
+
955
+ # On this dataset, we should have enough data points to not make it
956
+ # possible to get a near zero deviance (for the any of the admissible
957
+ # random seeds). This will make it easier to interpret meaning of rtol in
958
+ # the subsequent assertions:
959
+ assert original_newton_deviance > 0.2
960
+
961
+ # We check that the model could successfully fit information in X_orig to
962
+ # improve upon the constant baseline by a large margin (when evaluated on
963
+ # the traing set).
964
+ assert constant_model_deviance - original_newton_deviance > 0.1
965
+
966
+ # LBFGS is robust to a collinear design because its approximation of the
967
+ # Hessian is Symmeric Positive Definite by construction. Let's record its
968
+ # solution
969
+ with warnings.catch_warnings():
970
+ warnings.simplefilter("error")
971
+ reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y)
972
+ collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
973
+
974
+ # The LBFGS solution on the collinear is expected to reach a comparable
975
+ # solution to the Newton solution on the original data.
976
+ rtol = 1e-6
977
+ assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol)
978
+
979
+ # Fitting a Newton solver on the collinear version of the training data
980
+ # without regularization should raise an informative warning and fallback
981
+ # to the LBFGS solver.
982
+ msg = (
983
+ "The inner solver of .*Newton.*Solver stumbled upon a singular or very "
984
+ "ill-conditioned Hessian matrix"
985
+ )
986
+ with pytest.warns(scipy.linalg.LinAlgWarning, match=msg):
987
+ reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(
988
+ X_collinear, y
989
+ )
990
+ # As a result we should still automatically converge to a good solution.
991
+ collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
992
+ assert collinear_newton_deviance == pytest.approx(
993
+ original_newton_deviance, rel=rtol
994
+ )
995
+
996
+ # Increasing the regularization slightly should make the problem go away:
997
+ with warnings.catch_warnings():
998
+ warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
999
+ reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y)
1000
+
1001
+ # The slightly penalized model on the collinear data should be close enough
1002
+ # to the unpenalized model on the original data.
1003
+ penalized_collinear_newton_deviance = mean_poisson_deviance(
1004
+ y, reg.predict(X_collinear)
1005
+ )
1006
+ assert penalized_collinear_newton_deviance == pytest.approx(
1007
+ original_newton_deviance, rel=rtol
1008
+ )
1009
+
1010
+
1011
+ @pytest.mark.parametrize("verbose", [0, 1, 2])
1012
+ def test_newton_solver_verbosity(capsys, verbose):
1013
+ """Test the std output of verbose newton solvers."""
1014
+ y = np.array([1, 2], dtype=float)
1015
+ X = np.array([[1.0, 0], [0, 1]], dtype=float)
1016
+ linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False)
1017
+ sol = NewtonCholeskySolver(
1018
+ coef=linear_loss.init_zero_coef(X),
1019
+ linear_loss=linear_loss,
1020
+ l2_reg_strength=0,
1021
+ verbose=verbose,
1022
+ )
1023
+ sol.solve(X, y, None) # returns array([0., 0.69314758])
1024
+ captured = capsys.readouterr()
1025
+
1026
+ if verbose == 0:
1027
+ assert captured.out == ""
1028
+ else:
1029
+ msg = [
1030
+ "Newton iter=1",
1031
+ "Check Convergence",
1032
+ "1. max |gradient|",
1033
+ "2. Newton decrement",
1034
+ "Solver did converge at loss = ",
1035
+ ]
1036
+ for m in msg:
1037
+ assert m in captured.out
1038
+
1039
+ if verbose >= 2:
1040
+ msg = ["Backtracking Line Search", "line search iteration="]
1041
+ for m in msg:
1042
+ assert m in captured.out
1043
+
1044
+ # Set the Newton solver to a state with a completely wrong Newton step.
1045
+ sol = NewtonCholeskySolver(
1046
+ coef=linear_loss.init_zero_coef(X),
1047
+ linear_loss=linear_loss,
1048
+ l2_reg_strength=0,
1049
+ verbose=verbose,
1050
+ )
1051
+ sol.setup(X=X, y=y, sample_weight=None)
1052
+ sol.iteration = 1
1053
+ sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
1054
+ sol.coef_newton = np.array([1.0, 0])
1055
+ sol.gradient_times_newton = sol.gradient @ sol.coef_newton
1056
+ with warnings.catch_warnings():
1057
+ warnings.simplefilter("ignore", ConvergenceWarning)
1058
+ sol.line_search(X=X, y=y, sample_weight=None)
1059
+ captured = capsys.readouterr()
1060
+ if verbose >= 1:
1061
+ assert (
1062
+ "Line search did not converge and resorts to lbfgs instead." in captured.out
1063
+ )
1064
+
1065
+ # Set the Newton solver to a state with bad Newton step such that the loss
1066
+ # improvement in line search is tiny.
1067
+ sol = NewtonCholeskySolver(
1068
+ coef=np.array([1e-12, 0.69314758]),
1069
+ linear_loss=linear_loss,
1070
+ l2_reg_strength=0,
1071
+ verbose=verbose,
1072
+ )
1073
+ sol.setup(X=X, y=y, sample_weight=None)
1074
+ sol.iteration = 1
1075
+ sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
1076
+ sol.coef_newton = np.array([1e-6, 0])
1077
+ sol.gradient_times_newton = sol.gradient @ sol.coef_newton
1078
+ with warnings.catch_warnings():
1079
+ warnings.simplefilter("ignore", ConvergenceWarning)
1080
+ sol.line_search(X=X, y=y, sample_weight=None)
1081
+ captured = capsys.readouterr()
1082
+ if verbose >= 2:
1083
+ msg = [
1084
+ "line search iteration=",
1085
+ "check loss improvement <= armijo term:",
1086
+ "check loss |improvement| <= eps * |loss_old|:",
1087
+ "check sum(|gradient|) < sum(|gradient_old|):",
1088
+ ]
1089
+ for m in msg:
1090
+ assert m in captured.out
1091
+
1092
+ # Test for a case with negative hessian. We badly initialize coef for a Tweedie
1093
+ # loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link.
1094
+ linear_loss = LinearModelLoss(
1095
+ base_loss=HalfTweedieLoss(power=3), fit_intercept=False
1096
+ )
1097
+ sol = NewtonCholeskySolver(
1098
+ coef=linear_loss.init_zero_coef(X) + 1,
1099
+ linear_loss=linear_loss,
1100
+ l2_reg_strength=0,
1101
+ verbose=verbose,
1102
+ )
1103
+ with warnings.catch_warnings():
1104
+ warnings.simplefilter("ignore", ConvergenceWarning)
1105
+ sol.solve(X, y, None)
1106
+ captured = capsys.readouterr()
1107
+ if verbose >= 1:
1108
+ assert (
1109
+ "The inner solver detected a pointwise Hessian with many negative values"
1110
+ " and resorts to lbfgs instead."
1111
+ in captured.out
1112
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py ADDED
@@ -0,0 +1,2306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Least Angle Regression algorithm. See the documentation on the
3
+ Generalized Linear Model for a complete discussion.
4
+ """
5
+ # Author: Fabian Pedregosa <[email protected]>
6
+ # Alexandre Gramfort <[email protected]>
7
+ # Gael Varoquaux
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ import sys
12
+ import warnings
13
+ from math import log
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ from scipy import interpolate, linalg
18
+ from scipy.linalg.lapack import get_lapack_funcs
19
+
20
+ from ..base import MultiOutputMixin, RegressorMixin, _fit_context
21
+ from ..exceptions import ConvergenceWarning
22
+ from ..model_selection import check_cv
23
+
24
+ # mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs'
25
+ from ..utils import ( # type: ignore
26
+ Bunch,
27
+ arrayfuncs,
28
+ as_float_array,
29
+ check_random_state,
30
+ )
31
+ from ..utils._metadata_requests import (
32
+ MetadataRouter,
33
+ MethodMapping,
34
+ _raise_for_params,
35
+ _routing_enabled,
36
+ process_routing,
37
+ )
38
+ from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
39
+ from ..utils.parallel import Parallel, delayed
40
+ from ._base import LinearModel, LinearRegression, _preprocess_data
41
+
42
+ SOLVE_TRIANGULAR_ARGS = {"check_finite": False}
43
+
44
+
45
+ @validate_params(
46
+ {
47
+ "X": [np.ndarray, None],
48
+ "y": [np.ndarray, None],
49
+ "Xy": [np.ndarray, None],
50
+ "Gram": [StrOptions({"auto"}), "boolean", np.ndarray, None],
51
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
52
+ "alpha_min": [Interval(Real, 0, None, closed="left")],
53
+ "method": [StrOptions({"lar", "lasso"})],
54
+ "copy_X": ["boolean"],
55
+ "eps": [Interval(Real, 0, None, closed="neither"), None],
56
+ "copy_Gram": ["boolean"],
57
+ "verbose": ["verbose"],
58
+ "return_path": ["boolean"],
59
+ "return_n_iter": ["boolean"],
60
+ "positive": ["boolean"],
61
+ },
62
+ prefer_skip_nested_validation=True,
63
+ )
64
+ def lars_path(
65
+ X,
66
+ y,
67
+ Xy=None,
68
+ *,
69
+ Gram=None,
70
+ max_iter=500,
71
+ alpha_min=0,
72
+ method="lar",
73
+ copy_X=True,
74
+ eps=np.finfo(float).eps,
75
+ copy_Gram=True,
76
+ verbose=0,
77
+ return_path=True,
78
+ return_n_iter=False,
79
+ positive=False,
80
+ ):
81
+ """Compute Least Angle Regression or Lasso path using the LARS algorithm [1].
82
+
83
+ The optimization objective for the case method='lasso' is::
84
+
85
+ (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
86
+
87
+ in the case of method='lar', the objective function is only known in
88
+ the form of an implicit equation (see discussion in [1]).
89
+
90
+ Read more in the :ref:`User Guide <least_angle_regression>`.
91
+
92
+ Parameters
93
+ ----------
94
+ X : None or ndarray of shape (n_samples, n_features)
95
+ Input data. Note that if X is `None` then the Gram matrix must be
96
+ specified, i.e., cannot be `None` or `False`.
97
+
98
+ y : None or ndarray of shape (n_samples,)
99
+ Input targets.
100
+
101
+ Xy : array-like of shape (n_features,) or (n_features, n_targets), \
102
+ default=None
103
+ `Xy = X.T @ y` that can be precomputed. It is useful
104
+ only when the Gram matrix is precomputed.
105
+
106
+ Gram : None, 'auto', bool, ndarray of shape (n_features, n_features), \
107
+ default=None
108
+ Precomputed Gram matrix `X.T @ X`, if `'auto'`, the Gram
109
+ matrix is precomputed from the given X, if there are more samples
110
+ than features.
111
+
112
+ max_iter : int, default=500
113
+ Maximum number of iterations to perform, set to infinity for no limit.
114
+
115
+ alpha_min : float, default=0
116
+ Minimum correlation along the path. It corresponds to the
117
+ regularization parameter `alpha` in the Lasso.
118
+
119
+ method : {'lar', 'lasso'}, default='lar'
120
+ Specifies the returned model. Select `'lar'` for Least Angle
121
+ Regression, `'lasso'` for the Lasso.
122
+
123
+ copy_X : bool, default=True
124
+ If `False`, `X` is overwritten.
125
+
126
+ eps : float, default=np.finfo(float).eps
127
+ The machine-precision regularization in the computation of the
128
+ Cholesky diagonal factors. Increase this for very ill-conditioned
129
+ systems. Unlike the `tol` parameter in some iterative
130
+ optimization-based algorithms, this parameter does not control
131
+ the tolerance of the optimization.
132
+
133
+ copy_Gram : bool, default=True
134
+ If `False`, `Gram` is overwritten.
135
+
136
+ verbose : int, default=0
137
+ Controls output verbosity.
138
+
139
+ return_path : bool, default=True
140
+ If `True`, returns the entire path, else returns only the
141
+ last point of the path.
142
+
143
+ return_n_iter : bool, default=False
144
+ Whether to return the number of iterations.
145
+
146
+ positive : bool, default=False
147
+ Restrict coefficients to be >= 0.
148
+ This option is only allowed with method 'lasso'. Note that the model
149
+ coefficients will not converge to the ordinary-least-squares solution
150
+ for small values of alpha. Only coefficients up to the smallest alpha
151
+ value (`alphas_[alphas_ > 0.].min()` when fit_path=True) reached by
152
+ the stepwise Lars-Lasso algorithm are typically in congruence with the
153
+ solution of the coordinate descent `lasso_path` function.
154
+
155
+ Returns
156
+ -------
157
+ alphas : ndarray of shape (n_alphas + 1,)
158
+ Maximum of covariances (in absolute value) at each iteration.
159
+ `n_alphas` is either `max_iter`, `n_features`, or the
160
+ number of nodes in the path with `alpha >= alpha_min`, whichever
161
+ is smaller.
162
+
163
+ active : ndarray of shape (n_alphas,)
164
+ Indices of active variables at the end of the path.
165
+
166
+ coefs : ndarray of shape (n_features, n_alphas + 1)
167
+ Coefficients along the path.
168
+
169
+ n_iter : int
170
+ Number of iterations run. Returned only if `return_n_iter` is set
171
+ to True.
172
+
173
+ See Also
174
+ --------
175
+ lars_path_gram : Compute LARS path in the sufficient stats mode.
176
+ lasso_path : Compute Lasso path with coordinate descent.
177
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
178
+ Lars : Least Angle Regression model a.k.a. LAR.
179
+ LassoLarsCV : Cross-validated Lasso, using the LARS algorithm.
180
+ LarsCV : Cross-validated Least Angle Regression model.
181
+ sklearn.decomposition.sparse_encode : Sparse coding.
182
+
183
+ References
184
+ ----------
185
+ .. [1] "Least Angle Regression", Efron et al.
186
+ http://statweb.stanford.edu/~tibs/ftp/lars.pdf
187
+
188
+ .. [2] `Wikipedia entry on the Least-angle regression
189
+ <https://en.wikipedia.org/wiki/Least-angle_regression>`_
190
+
191
+ .. [3] `Wikipedia entry on the Lasso
192
+ <https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
193
+ """
194
+ if X is None and Gram is not None:
195
+ raise ValueError(
196
+ "X cannot be None if Gram is not None"
197
+ "Use lars_path_gram to avoid passing X and y."
198
+ )
199
+ return _lars_path_solver(
200
+ X=X,
201
+ y=y,
202
+ Xy=Xy,
203
+ Gram=Gram,
204
+ n_samples=None,
205
+ max_iter=max_iter,
206
+ alpha_min=alpha_min,
207
+ method=method,
208
+ copy_X=copy_X,
209
+ eps=eps,
210
+ copy_Gram=copy_Gram,
211
+ verbose=verbose,
212
+ return_path=return_path,
213
+ return_n_iter=return_n_iter,
214
+ positive=positive,
215
+ )
216
+
217
+
218
+ @validate_params(
219
+ {
220
+ "Xy": [np.ndarray],
221
+ "Gram": [np.ndarray],
222
+ "n_samples": [Interval(Integral, 0, None, closed="left")],
223
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
224
+ "alpha_min": [Interval(Real, 0, None, closed="left")],
225
+ "method": [StrOptions({"lar", "lasso"})],
226
+ "copy_X": ["boolean"],
227
+ "eps": [Interval(Real, 0, None, closed="neither"), None],
228
+ "copy_Gram": ["boolean"],
229
+ "verbose": ["verbose"],
230
+ "return_path": ["boolean"],
231
+ "return_n_iter": ["boolean"],
232
+ "positive": ["boolean"],
233
+ },
234
+ prefer_skip_nested_validation=True,
235
+ )
236
+ def lars_path_gram(
237
+ Xy,
238
+ Gram,
239
+ *,
240
+ n_samples,
241
+ max_iter=500,
242
+ alpha_min=0,
243
+ method="lar",
244
+ copy_X=True,
245
+ eps=np.finfo(float).eps,
246
+ copy_Gram=True,
247
+ verbose=0,
248
+ return_path=True,
249
+ return_n_iter=False,
250
+ positive=False,
251
+ ):
252
+ """The lars_path in the sufficient stats mode [1].
253
+
254
+ The optimization objective for the case method='lasso' is::
255
+
256
+ (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
257
+
258
+ in the case of method='lars', the objective function is only known in
259
+ the form of an implicit equation (see discussion in [1])
260
+
261
+ Read more in the :ref:`User Guide <least_angle_regression>`.
262
+
263
+ Parameters
264
+ ----------
265
+ Xy : ndarray of shape (n_features,) or (n_features, n_targets)
266
+ `Xy = X.T @ y`.
267
+
268
+ Gram : ndarray of shape (n_features, n_features)
269
+ `Gram = X.T @ X`.
270
+
271
+ n_samples : int
272
+ Equivalent size of sample.
273
+
274
+ max_iter : int, default=500
275
+ Maximum number of iterations to perform, set to infinity for no limit.
276
+
277
+ alpha_min : float, default=0
278
+ Minimum correlation along the path. It corresponds to the
279
+ regularization parameter alpha parameter in the Lasso.
280
+
281
+ method : {'lar', 'lasso'}, default='lar'
282
+ Specifies the returned model. Select `'lar'` for Least Angle
283
+ Regression, ``'lasso'`` for the Lasso.
284
+
285
+ copy_X : bool, default=True
286
+ If `False`, `X` is overwritten.
287
+
288
+ eps : float, default=np.finfo(float).eps
289
+ The machine-precision regularization in the computation of the
290
+ Cholesky diagonal factors. Increase this for very ill-conditioned
291
+ systems. Unlike the `tol` parameter in some iterative
292
+ optimization-based algorithms, this parameter does not control
293
+ the tolerance of the optimization.
294
+
295
+ copy_Gram : bool, default=True
296
+ If `False`, `Gram` is overwritten.
297
+
298
+ verbose : int, default=0
299
+ Controls output verbosity.
300
+
301
+ return_path : bool, default=True
302
+ If `return_path==True` returns the entire path, else returns only the
303
+ last point of the path.
304
+
305
+ return_n_iter : bool, default=False
306
+ Whether to return the number of iterations.
307
+
308
+ positive : bool, default=False
309
+ Restrict coefficients to be >= 0.
310
+ This option is only allowed with method 'lasso'. Note that the model
311
+ coefficients will not converge to the ordinary-least-squares solution
312
+ for small values of alpha. Only coefficients up to the smallest alpha
313
+ value (`alphas_[alphas_ > 0.].min()` when `fit_path=True`) reached by
314
+ the stepwise Lars-Lasso algorithm are typically in congruence with the
315
+ solution of the coordinate descent lasso_path function.
316
+
317
+ Returns
318
+ -------
319
+ alphas : ndarray of shape (n_alphas + 1,)
320
+ Maximum of covariances (in absolute value) at each iteration.
321
+ `n_alphas` is either `max_iter`, `n_features` or the
322
+ number of nodes in the path with `alpha >= alpha_min`, whichever
323
+ is smaller.
324
+
325
+ active : ndarray of shape (n_alphas,)
326
+ Indices of active variables at the end of the path.
327
+
328
+ coefs : ndarray of shape (n_features, n_alphas + 1)
329
+ Coefficients along the path.
330
+
331
+ n_iter : int
332
+ Number of iterations run. Returned only if `return_n_iter` is set
333
+ to True.
334
+
335
+ See Also
336
+ --------
337
+ lars_path_gram : Compute LARS path.
338
+ lasso_path : Compute Lasso path with coordinate descent.
339
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
340
+ Lars : Least Angle Regression model a.k.a. LAR.
341
+ LassoLarsCV : Cross-validated Lasso, using the LARS algorithm.
342
+ LarsCV : Cross-validated Least Angle Regression model.
343
+ sklearn.decomposition.sparse_encode : Sparse coding.
344
+
345
+ References
346
+ ----------
347
+ .. [1] "Least Angle Regression", Efron et al.
348
+ http://statweb.stanford.edu/~tibs/ftp/lars.pdf
349
+
350
+ .. [2] `Wikipedia entry on the Least-angle regression
351
+ <https://en.wikipedia.org/wiki/Least-angle_regression>`_
352
+
353
+ .. [3] `Wikipedia entry on the Lasso
354
+ <https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
355
+ """
356
+ return _lars_path_solver(
357
+ X=None,
358
+ y=None,
359
+ Xy=Xy,
360
+ Gram=Gram,
361
+ n_samples=n_samples,
362
+ max_iter=max_iter,
363
+ alpha_min=alpha_min,
364
+ method=method,
365
+ copy_X=copy_X,
366
+ eps=eps,
367
+ copy_Gram=copy_Gram,
368
+ verbose=verbose,
369
+ return_path=return_path,
370
+ return_n_iter=return_n_iter,
371
+ positive=positive,
372
+ )
373
+
374
+
375
+ def _lars_path_solver(
376
+ X,
377
+ y,
378
+ Xy=None,
379
+ Gram=None,
380
+ n_samples=None,
381
+ max_iter=500,
382
+ alpha_min=0,
383
+ method="lar",
384
+ copy_X=True,
385
+ eps=np.finfo(float).eps,
386
+ copy_Gram=True,
387
+ verbose=0,
388
+ return_path=True,
389
+ return_n_iter=False,
390
+ positive=False,
391
+ ):
392
+ """Compute Least Angle Regression or Lasso path using LARS algorithm [1]
393
+
394
+ The optimization objective for the case method='lasso' is::
395
+
396
+ (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
397
+
398
+ in the case of method='lars', the objective function is only known in
399
+ the form of an implicit equation (see discussion in [1])
400
+
401
+ Read more in the :ref:`User Guide <least_angle_regression>`.
402
+
403
+ Parameters
404
+ ----------
405
+ X : None or ndarray of shape (n_samples, n_features)
406
+ Input data. Note that if X is None then Gram must be specified,
407
+ i.e., cannot be None or False.
408
+
409
+ y : None or ndarray of shape (n_samples,)
410
+ Input targets.
411
+
412
+ Xy : array-like of shape (n_features,) or (n_features, n_targets), \
413
+ default=None
414
+ `Xy = np.dot(X.T, y)` that can be precomputed. It is useful
415
+ only when the Gram matrix is precomputed.
416
+
417
+ Gram : None, 'auto' or array-like of shape (n_features, n_features), \
418
+ default=None
419
+ Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram
420
+ matrix is precomputed from the given X, if there are more samples
421
+ than features.
422
+
423
+ n_samples : int or float, default=None
424
+ Equivalent size of sample. If `None`, it will be `n_samples`.
425
+
426
+ max_iter : int, default=500
427
+ Maximum number of iterations to perform, set to infinity for no limit.
428
+
429
+ alpha_min : float, default=0
430
+ Minimum correlation along the path. It corresponds to the
431
+ regularization parameter alpha parameter in the Lasso.
432
+
433
+ method : {'lar', 'lasso'}, default='lar'
434
+ Specifies the returned model. Select ``'lar'`` for Least Angle
435
+ Regression, ``'lasso'`` for the Lasso.
436
+
437
+ copy_X : bool, default=True
438
+ If ``False``, ``X`` is overwritten.
439
+
440
+ eps : float, default=np.finfo(float).eps
441
+ The machine-precision regularization in the computation of the
442
+ Cholesky diagonal factors. Increase this for very ill-conditioned
443
+ systems. Unlike the ``tol`` parameter in some iterative
444
+ optimization-based algorithms, this parameter does not control
445
+ the tolerance of the optimization.
446
+
447
+ copy_Gram : bool, default=True
448
+ If ``False``, ``Gram`` is overwritten.
449
+
450
+ verbose : int, default=0
451
+ Controls output verbosity.
452
+
453
+ return_path : bool, default=True
454
+ If ``return_path==True`` returns the entire path, else returns only the
455
+ last point of the path.
456
+
457
+ return_n_iter : bool, default=False
458
+ Whether to return the number of iterations.
459
+
460
+ positive : bool, default=False
461
+ Restrict coefficients to be >= 0.
462
+ This option is only allowed with method 'lasso'. Note that the model
463
+ coefficients will not converge to the ordinary-least-squares solution
464
+ for small values of alpha. Only coefficients up to the smallest alpha
465
+ value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
466
+ the stepwise Lars-Lasso algorithm are typically in congruence with the
467
+ solution of the coordinate descent lasso_path function.
468
+
469
+ Returns
470
+ -------
471
+ alphas : array-like of shape (n_alphas + 1,)
472
+ Maximum of covariances (in absolute value) at each iteration.
473
+ ``n_alphas`` is either ``max_iter``, ``n_features`` or the
474
+ number of nodes in the path with ``alpha >= alpha_min``, whichever
475
+ is smaller.
476
+
477
+ active : array-like of shape (n_alphas,)
478
+ Indices of active variables at the end of the path.
479
+
480
+ coefs : array-like of shape (n_features, n_alphas + 1)
481
+ Coefficients along the path
482
+
483
+ n_iter : int
484
+ Number of iterations run. Returned only if return_n_iter is set
485
+ to True.
486
+
487
+ See Also
488
+ --------
489
+ lasso_path
490
+ LassoLars
491
+ Lars
492
+ LassoLarsCV
493
+ LarsCV
494
+ sklearn.decomposition.sparse_encode
495
+
496
+ References
497
+ ----------
498
+ .. [1] "Least Angle Regression", Efron et al.
499
+ http://statweb.stanford.edu/~tibs/ftp/lars.pdf
500
+
501
+ .. [2] `Wikipedia entry on the Least-angle regression
502
+ <https://en.wikipedia.org/wiki/Least-angle_regression>`_
503
+
504
+ .. [3] `Wikipedia entry on the Lasso
505
+ <https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
506
+
507
+ """
508
+ if method == "lar" and positive:
509
+ raise ValueError("Positive constraint not supported for 'lar' coding method.")
510
+
511
+ n_samples = n_samples if n_samples is not None else y.size
512
+
513
+ if Xy is None:
514
+ Cov = np.dot(X.T, y)
515
+ else:
516
+ Cov = Xy.copy()
517
+
518
+ if Gram is None or Gram is False:
519
+ Gram = None
520
+ if X is None:
521
+ raise ValueError("X and Gram cannot both be unspecified.")
522
+ elif isinstance(Gram, str) and Gram == "auto" or Gram is True:
523
+ if Gram is True or X.shape[0] > X.shape[1]:
524
+ Gram = np.dot(X.T, X)
525
+ else:
526
+ Gram = None
527
+ elif copy_Gram:
528
+ Gram = Gram.copy()
529
+
530
+ if Gram is None:
531
+ n_features = X.shape[1]
532
+ else:
533
+ n_features = Cov.shape[0]
534
+ if Gram.shape != (n_features, n_features):
535
+ raise ValueError("The shapes of the inputs Gram and Xy do not match.")
536
+
537
+ if copy_X and X is not None and Gram is None:
538
+ # force copy. setting the array to be fortran-ordered
539
+ # speeds up the calculation of the (partial) Gram matrix
540
+ # and allows to easily swap columns
541
+ X = X.copy("F")
542
+
543
+ max_features = min(max_iter, n_features)
544
+
545
+ dtypes = set(a.dtype for a in (X, y, Xy, Gram) if a is not None)
546
+ if len(dtypes) == 1:
547
+ # use the precision level of input data if it is consistent
548
+ return_dtype = next(iter(dtypes))
549
+ else:
550
+ # fallback to double precision otherwise
551
+ return_dtype = np.float64
552
+
553
+ if return_path:
554
+ coefs = np.zeros((max_features + 1, n_features), dtype=return_dtype)
555
+ alphas = np.zeros(max_features + 1, dtype=return_dtype)
556
+ else:
557
+ coef, prev_coef = (
558
+ np.zeros(n_features, dtype=return_dtype),
559
+ np.zeros(n_features, dtype=return_dtype),
560
+ )
561
+ alpha, prev_alpha = (
562
+ np.array([0.0], dtype=return_dtype),
563
+ np.array([0.0], dtype=return_dtype),
564
+ )
565
+ # above better ideas?
566
+
567
+ n_iter, n_active = 0, 0
568
+ active, indices = list(), np.arange(n_features)
569
+ # holds the sign of covariance
570
+ sign_active = np.empty(max_features, dtype=np.int8)
571
+ drop = False
572
+
573
+ # will hold the cholesky factorization. Only lower part is
574
+ # referenced.
575
+ if Gram is None:
576
+ L = np.empty((max_features, max_features), dtype=X.dtype)
577
+ swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (X,))
578
+ else:
579
+ L = np.empty((max_features, max_features), dtype=Gram.dtype)
580
+ swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (Cov,))
581
+ (solve_cholesky,) = get_lapack_funcs(("potrs",), (L,))
582
+
583
+ if verbose:
584
+ if verbose > 1:
585
+ print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
586
+ else:
587
+ sys.stdout.write(".")
588
+ sys.stdout.flush()
589
+
590
+ tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
591
+ cov_precision = np.finfo(Cov.dtype).precision
592
+ equality_tolerance = np.finfo(np.float32).eps
593
+
594
+ if Gram is not None:
595
+ Gram_copy = Gram.copy()
596
+ Cov_copy = Cov.copy()
597
+
598
+ while True:
599
+ if Cov.size:
600
+ if positive:
601
+ C_idx = np.argmax(Cov)
602
+ else:
603
+ C_idx = np.argmax(np.abs(Cov))
604
+
605
+ C_ = Cov[C_idx]
606
+
607
+ if positive:
608
+ C = C_
609
+ else:
610
+ C = np.fabs(C_)
611
+ else:
612
+ C = 0.0
613
+
614
+ if return_path:
615
+ alpha = alphas[n_iter, np.newaxis]
616
+ coef = coefs[n_iter]
617
+ prev_alpha = alphas[n_iter - 1, np.newaxis]
618
+ prev_coef = coefs[n_iter - 1]
619
+
620
+ alpha[0] = C / n_samples
621
+ if alpha[0] <= alpha_min + equality_tolerance: # early stopping
622
+ if abs(alpha[0] - alpha_min) > equality_tolerance:
623
+ # interpolation factor 0 <= ss < 1
624
+ if n_iter > 0:
625
+ # In the first iteration, all alphas are zero, the formula
626
+ # below would make ss a NaN
627
+ ss = (prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0])
628
+ coef[:] = prev_coef + ss * (coef - prev_coef)
629
+ alpha[0] = alpha_min
630
+ if return_path:
631
+ coefs[n_iter] = coef
632
+ break
633
+
634
+ if n_iter >= max_iter or n_active >= n_features:
635
+ break
636
+ if not drop:
637
+ ##########################################################
638
+ # Append x_j to the Cholesky factorization of (Xa * Xa') #
639
+ # #
640
+ # ( L 0 ) #
641
+ # L -> ( ) , where L * w = Xa' x_j #
642
+ # ( w z ) and z = ||x_j|| #
643
+ # #
644
+ ##########################################################
645
+
646
+ if positive:
647
+ sign_active[n_active] = np.ones_like(C_)
648
+ else:
649
+ sign_active[n_active] = np.sign(C_)
650
+ m, n = n_active, C_idx + n_active
651
+
652
+ Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
653
+ indices[n], indices[m] = indices[m], indices[n]
654
+ Cov_not_shortened = Cov
655
+ Cov = Cov[1:] # remove Cov[0]
656
+
657
+ if Gram is None:
658
+ X.T[n], X.T[m] = swap(X.T[n], X.T[m])
659
+ c = nrm2(X.T[n_active]) ** 2
660
+ L[n_active, :n_active] = np.dot(X.T[n_active], X.T[:n_active].T)
661
+ else:
662
+ # swap does only work inplace if matrix is fortran
663
+ # contiguous ...
664
+ Gram[m], Gram[n] = swap(Gram[m], Gram[n])
665
+ Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
666
+ c = Gram[n_active, n_active]
667
+ L[n_active, :n_active] = Gram[n_active, :n_active]
668
+
669
+ # Update the cholesky decomposition for the Gram matrix
670
+ if n_active:
671
+ linalg.solve_triangular(
672
+ L[:n_active, :n_active],
673
+ L[n_active, :n_active],
674
+ trans=0,
675
+ lower=1,
676
+ overwrite_b=True,
677
+ **SOLVE_TRIANGULAR_ARGS,
678
+ )
679
+
680
+ v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
681
+ diag = max(np.sqrt(np.abs(c - v)), eps)
682
+ L[n_active, n_active] = diag
683
+
684
+ if diag < 1e-7:
685
+ # The system is becoming too ill-conditioned.
686
+ # We have degenerate vectors in our active set.
687
+ # We'll 'drop for good' the last regressor added.
688
+ warnings.warn(
689
+ "Regressors in active set degenerate. "
690
+ "Dropping a regressor, after %i iterations, "
691
+ "i.e. alpha=%.3e, "
692
+ "with an active set of %i regressors, and "
693
+ "the smallest cholesky pivot element being %.3e."
694
+ " Reduce max_iter or increase eps parameters."
695
+ % (n_iter, alpha.item(), n_active, diag),
696
+ ConvergenceWarning,
697
+ )
698
+
699
+ # XXX: need to figure a 'drop for good' way
700
+ Cov = Cov_not_shortened
701
+ Cov[0] = 0
702
+ Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
703
+ continue
704
+
705
+ active.append(indices[n_active])
706
+ n_active += 1
707
+
708
+ if verbose > 1:
709
+ print(
710
+ "%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], "", n_active, C)
711
+ )
712
+
713
+ if method == "lasso" and n_iter > 0 and prev_alpha[0] < alpha[0]:
714
+ # alpha is increasing. This is because the updates of Cov are
715
+ # bringing in too much numerical error that is greater than
716
+ # than the remaining correlation with the
717
+ # regressors. Time to bail out
718
+ warnings.warn(
719
+ "Early stopping the lars path, as the residues "
720
+ "are small and the current value of alpha is no "
721
+ "longer well controlled. %i iterations, alpha=%.3e, "
722
+ "previous alpha=%.3e, with an active set of %i "
723
+ "regressors." % (n_iter, alpha.item(), prev_alpha.item(), n_active),
724
+ ConvergenceWarning,
725
+ )
726
+ break
727
+
728
+ # least squares solution
729
+ least_squares, _ = solve_cholesky(
730
+ L[:n_active, :n_active], sign_active[:n_active], lower=True
731
+ )
732
+
733
+ if least_squares.size == 1 and least_squares == 0:
734
+ # This happens because sign_active[:n_active] = 0
735
+ least_squares[...] = 1
736
+ AA = 1.0
737
+ else:
738
+ # is this really needed ?
739
+ AA = 1.0 / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
740
+
741
+ if not np.isfinite(AA):
742
+ # L is too ill-conditioned
743
+ i = 0
744
+ L_ = L[:n_active, :n_active].copy()
745
+ while not np.isfinite(AA):
746
+ L_.flat[:: n_active + 1] += (2**i) * eps
747
+ least_squares, _ = solve_cholesky(
748
+ L_, sign_active[:n_active], lower=True
749
+ )
750
+ tmp = max(np.sum(least_squares * sign_active[:n_active]), eps)
751
+ AA = 1.0 / np.sqrt(tmp)
752
+ i += 1
753
+ least_squares *= AA
754
+
755
+ if Gram is None:
756
+ # equiangular direction of variables in the active set
757
+ eq_dir = np.dot(X.T[:n_active].T, least_squares)
758
+ # correlation between each unactive variables and
759
+ # eqiangular vector
760
+ corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
761
+ else:
762
+ # if huge number of features, this takes 50% of time, I
763
+ # think could be avoided if we just update it using an
764
+ # orthogonal (QR) decomposition of X
765
+ corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares)
766
+
767
+ # Explicit rounding can be necessary to avoid `np.argmax(Cov)` yielding
768
+ # unstable results because of rounding errors.
769
+ np.around(corr_eq_dir, decimals=cov_precision, out=corr_eq_dir)
770
+
771
+ g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))
772
+ if positive:
773
+ gamma_ = min(g1, C / AA)
774
+ else:
775
+ g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))
776
+ gamma_ = min(g1, g2, C / AA)
777
+
778
+ # TODO: better names for these variables: z
779
+ drop = False
780
+ z = -coef[active] / (least_squares + tiny32)
781
+ z_pos = arrayfuncs.min_pos(z)
782
+ if z_pos < gamma_:
783
+ # some coefficients have changed sign
784
+ idx = np.where(z == z_pos)[0][::-1]
785
+
786
+ # update the sign, important for LAR
787
+ sign_active[idx] = -sign_active[idx]
788
+
789
+ if method == "lasso":
790
+ gamma_ = z_pos
791
+ drop = True
792
+
793
+ n_iter += 1
794
+
795
+ if return_path:
796
+ if n_iter >= coefs.shape[0]:
797
+ del coef, alpha, prev_alpha, prev_coef
798
+ # resize the coefs and alphas array
799
+ add_features = 2 * max(1, (max_features - n_active))
800
+ coefs = np.resize(coefs, (n_iter + add_features, n_features))
801
+ coefs[-add_features:] = 0
802
+ alphas = np.resize(alphas, n_iter + add_features)
803
+ alphas[-add_features:] = 0
804
+ coef = coefs[n_iter]
805
+ prev_coef = coefs[n_iter - 1]
806
+ else:
807
+ # mimic the effect of incrementing n_iter on the array references
808
+ prev_coef = coef
809
+ prev_alpha[0] = alpha[0]
810
+ coef = np.zeros_like(coef)
811
+
812
+ coef[active] = prev_coef[active] + gamma_ * least_squares
813
+
814
+ # update correlations
815
+ Cov -= gamma_ * corr_eq_dir
816
+
817
+ # See if any coefficient has changed sign
818
+ if drop and method == "lasso":
819
+ # handle the case when idx is not length of 1
820
+ for ii in idx:
821
+ arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii)
822
+
823
+ n_active -= 1
824
+ # handle the case when idx is not length of 1
825
+ drop_idx = [active.pop(ii) for ii in idx]
826
+
827
+ if Gram is None:
828
+ # propagate dropped variable
829
+ for ii in idx:
830
+ for i in range(ii, n_active):
831
+ X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
832
+ # yeah this is stupid
833
+ indices[i], indices[i + 1] = indices[i + 1], indices[i]
834
+
835
+ # TODO: this could be updated
836
+ residual = y - np.dot(X[:, :n_active], coef[active])
837
+ temp = np.dot(X.T[n_active], residual)
838
+
839
+ Cov = np.r_[temp, Cov]
840
+ else:
841
+ for ii in idx:
842
+ for i in range(ii, n_active):
843
+ indices[i], indices[i + 1] = indices[i + 1], indices[i]
844
+ Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
845
+ Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1])
846
+
847
+ # Cov_n = Cov_j + x_j * X + increment(betas) TODO:
848
+ # will this still work with multiple drops ?
849
+
850
+ # recompute covariance. Probably could be done better
851
+ # wrong as Xy is not swapped with the rest of variables
852
+
853
+ # TODO: this could be updated
854
+ temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef)
855
+ Cov = np.r_[temp, Cov]
856
+
857
+ sign_active = np.delete(sign_active, idx)
858
+ sign_active = np.append(sign_active, 0.0) # just to maintain size
859
+ if verbose > 1:
860
+ print(
861
+ "%s\t\t%s\t\t%s\t\t%s\t\t%s"
862
+ % (n_iter, "", drop_idx, n_active, abs(temp))
863
+ )
864
+
865
+ if return_path:
866
+ # resize coefs in case of early stop
867
+ alphas = alphas[: n_iter + 1]
868
+ coefs = coefs[: n_iter + 1]
869
+
870
+ if return_n_iter:
871
+ return alphas, active, coefs.T, n_iter
872
+ else:
873
+ return alphas, active, coefs.T
874
+ else:
875
+ if return_n_iter:
876
+ return alpha, active, coef, n_iter
877
+ else:
878
+ return alpha, active, coef
879
+
880
+
881
+ ###############################################################################
882
+ # Estimator classes
883
+
884
+
885
+ class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
886
+ """Least Angle Regression model a.k.a. LAR.
887
+
888
+ Read more in the :ref:`User Guide <least_angle_regression>`.
889
+
890
+ Parameters
891
+ ----------
892
+ fit_intercept : bool, default=True
893
+ Whether to calculate the intercept for this model. If set
894
+ to false, no intercept will be used in calculations
895
+ (i.e. data is expected to be centered).
896
+
897
+ verbose : bool or int, default=False
898
+ Sets the verbosity amount.
899
+
900
+ precompute : bool, 'auto' or array-like , default='auto'
901
+ Whether to use a precomputed Gram matrix to speed up
902
+ calculations. If set to ``'auto'`` let us decide. The Gram
903
+ matrix can also be passed as argument.
904
+
905
+ n_nonzero_coefs : int, default=500
906
+ Target number of non-zero coefficients. Use ``np.inf`` for no limit.
907
+
908
+ eps : float, default=np.finfo(float).eps
909
+ The machine-precision regularization in the computation of the
910
+ Cholesky diagonal factors. Increase this for very ill-conditioned
911
+ systems. Unlike the ``tol`` parameter in some iterative
912
+ optimization-based algorithms, this parameter does not control
913
+ the tolerance of the optimization.
914
+
915
+ copy_X : bool, default=True
916
+ If ``True``, X will be copied; else, it may be overwritten.
917
+
918
+ fit_path : bool, default=True
919
+ If True the full path is stored in the ``coef_path_`` attribute.
920
+ If you compute the solution for a large problem or many targets,
921
+ setting ``fit_path`` to ``False`` will lead to a speedup, especially
922
+ with a small alpha.
923
+
924
+ jitter : float, default=None
925
+ Upper bound on a uniform noise parameter to be added to the
926
+ `y` values, to satisfy the model's assumption of
927
+ one-at-a-time computations. Might help with stability.
928
+
929
+ .. versionadded:: 0.23
930
+
931
+ random_state : int, RandomState instance or None, default=None
932
+ Determines random number generation for jittering. Pass an int
933
+ for reproducible output across multiple function calls.
934
+ See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
935
+
936
+ .. versionadded:: 0.23
937
+
938
+ Attributes
939
+ ----------
940
+ alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
941
+ Maximum of covariances (in absolute value) at each iteration.
942
+ ``n_alphas`` is either ``max_iter``, ``n_features`` or the
943
+ number of nodes in the path with ``alpha >= alpha_min``, whichever
944
+ is smaller. If this is a list of array-like, the length of the outer
945
+ list is `n_targets`.
946
+
947
+ active_ : list of shape (n_alphas,) or list of such lists
948
+ Indices of active variables at the end of the path.
949
+ If this is a list of list, the length of the outer list is `n_targets`.
950
+
951
+ coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
952
+ of such arrays
953
+ The varying values of the coefficients along the path. It is not
954
+ present if the ``fit_path`` parameter is ``False``. If this is a list
955
+ of array-like, the length of the outer list is `n_targets`.
956
+
957
+ coef_ : array-like of shape (n_features,) or (n_targets, n_features)
958
+ Parameter vector (w in the formulation formula).
959
+
960
+ intercept_ : float or array-like of shape (n_targets,)
961
+ Independent term in decision function.
962
+
963
+ n_iter_ : array-like or int
964
+ The number of iterations taken by lars_path to find the
965
+ grid of alphas for each target.
966
+
967
+ n_features_in_ : int
968
+ Number of features seen during :term:`fit`.
969
+
970
+ .. versionadded:: 0.24
971
+
972
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
973
+ Names of features seen during :term:`fit`. Defined only when `X`
974
+ has feature names that are all strings.
975
+
976
+ .. versionadded:: 1.0
977
+
978
+ See Also
979
+ --------
980
+ lars_path: Compute Least Angle Regression or Lasso
981
+ path using LARS algorithm.
982
+ LarsCV : Cross-validated Least Angle Regression model.
983
+ sklearn.decomposition.sparse_encode : Sparse coding.
984
+
985
+ Examples
986
+ --------
987
+ >>> from sklearn import linear_model
988
+ >>> reg = linear_model.Lars(n_nonzero_coefs=1)
989
+ >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
990
+ Lars(n_nonzero_coefs=1)
991
+ >>> print(reg.coef_)
992
+ [ 0. -1.11...]
993
+ """
994
+
995
+ _parameter_constraints: dict = {
996
+ "fit_intercept": ["boolean"],
997
+ "verbose": ["verbose"],
998
+ "precompute": ["boolean", StrOptions({"auto"}), np.ndarray, Hidden(None)],
999
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")],
1000
+ "eps": [Interval(Real, 0, None, closed="left")],
1001
+ "copy_X": ["boolean"],
1002
+ "fit_path": ["boolean"],
1003
+ "jitter": [Interval(Real, 0, None, closed="left"), None],
1004
+ "random_state": ["random_state"],
1005
+ }
1006
+
1007
+ method = "lar"
1008
+ positive = False
1009
+
1010
+ def __init__(
1011
+ self,
1012
+ *,
1013
+ fit_intercept=True,
1014
+ verbose=False,
1015
+ precompute="auto",
1016
+ n_nonzero_coefs=500,
1017
+ eps=np.finfo(float).eps,
1018
+ copy_X=True,
1019
+ fit_path=True,
1020
+ jitter=None,
1021
+ random_state=None,
1022
+ ):
1023
+ self.fit_intercept = fit_intercept
1024
+ self.verbose = verbose
1025
+ self.precompute = precompute
1026
+ self.n_nonzero_coefs = n_nonzero_coefs
1027
+ self.eps = eps
1028
+ self.copy_X = copy_X
1029
+ self.fit_path = fit_path
1030
+ self.jitter = jitter
1031
+ self.random_state = random_state
1032
+
1033
+ @staticmethod
1034
+ def _get_gram(precompute, X, y):
1035
+ if (not hasattr(precompute, "__array__")) and (
1036
+ (precompute is True)
1037
+ or (precompute == "auto" and X.shape[0] > X.shape[1])
1038
+ or (precompute == "auto" and y.shape[1] > 1)
1039
+ ):
1040
+ precompute = np.dot(X.T, X)
1041
+
1042
+ return precompute
1043
+
1044
+ def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):
1045
+ """Auxiliary method to fit the model using X, y as training data"""
1046
+ n_features = X.shape[1]
1047
+
1048
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
1049
+ X, y, fit_intercept=self.fit_intercept, copy=self.copy_X
1050
+ )
1051
+
1052
+ if y.ndim == 1:
1053
+ y = y[:, np.newaxis]
1054
+
1055
+ n_targets = y.shape[1]
1056
+
1057
+ Gram = self._get_gram(self.precompute, X, y)
1058
+
1059
+ self.alphas_ = []
1060
+ self.n_iter_ = []
1061
+ self.coef_ = np.empty((n_targets, n_features), dtype=X.dtype)
1062
+
1063
+ if fit_path:
1064
+ self.active_ = []
1065
+ self.coef_path_ = []
1066
+ for k in range(n_targets):
1067
+ this_Xy = None if Xy is None else Xy[:, k]
1068
+ alphas, active, coef_path, n_iter_ = lars_path(
1069
+ X,
1070
+ y[:, k],
1071
+ Gram=Gram,
1072
+ Xy=this_Xy,
1073
+ copy_X=self.copy_X,
1074
+ copy_Gram=True,
1075
+ alpha_min=alpha,
1076
+ method=self.method,
1077
+ verbose=max(0, self.verbose - 1),
1078
+ max_iter=max_iter,
1079
+ eps=self.eps,
1080
+ return_path=True,
1081
+ return_n_iter=True,
1082
+ positive=self.positive,
1083
+ )
1084
+ self.alphas_.append(alphas)
1085
+ self.active_.append(active)
1086
+ self.n_iter_.append(n_iter_)
1087
+ self.coef_path_.append(coef_path)
1088
+ self.coef_[k] = coef_path[:, -1]
1089
+
1090
+ if n_targets == 1:
1091
+ self.alphas_, self.active_, self.coef_path_, self.coef_ = [
1092
+ a[0]
1093
+ for a in (self.alphas_, self.active_, self.coef_path_, self.coef_)
1094
+ ]
1095
+ self.n_iter_ = self.n_iter_[0]
1096
+ else:
1097
+ for k in range(n_targets):
1098
+ this_Xy = None if Xy is None else Xy[:, k]
1099
+ alphas, _, self.coef_[k], n_iter_ = lars_path(
1100
+ X,
1101
+ y[:, k],
1102
+ Gram=Gram,
1103
+ Xy=this_Xy,
1104
+ copy_X=self.copy_X,
1105
+ copy_Gram=True,
1106
+ alpha_min=alpha,
1107
+ method=self.method,
1108
+ verbose=max(0, self.verbose - 1),
1109
+ max_iter=max_iter,
1110
+ eps=self.eps,
1111
+ return_path=False,
1112
+ return_n_iter=True,
1113
+ positive=self.positive,
1114
+ )
1115
+ self.alphas_.append(alphas)
1116
+ self.n_iter_.append(n_iter_)
1117
+ if n_targets == 1:
1118
+ self.alphas_ = self.alphas_[0]
1119
+ self.n_iter_ = self.n_iter_[0]
1120
+
1121
+ self._set_intercept(X_offset, y_offset, X_scale)
1122
+ return self
1123
+
1124
+ @_fit_context(prefer_skip_nested_validation=True)
1125
+ def fit(self, X, y, Xy=None):
1126
+ """Fit the model using X, y as training data.
1127
+
1128
+ Parameters
1129
+ ----------
1130
+ X : array-like of shape (n_samples, n_features)
1131
+ Training data.
1132
+
1133
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
1134
+ Target values.
1135
+
1136
+ Xy : array-like of shape (n_features,) or (n_features, n_targets), \
1137
+ default=None
1138
+ Xy = np.dot(X.T, y) that can be precomputed. It is useful
1139
+ only when the Gram matrix is precomputed.
1140
+
1141
+ Returns
1142
+ -------
1143
+ self : object
1144
+ Returns an instance of self.
1145
+ """
1146
+ X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)
1147
+
1148
+ alpha = getattr(self, "alpha", 0.0)
1149
+ if hasattr(self, "n_nonzero_coefs"):
1150
+ alpha = 0.0 # n_nonzero_coefs parametrization takes priority
1151
+ max_iter = self.n_nonzero_coefs
1152
+ else:
1153
+ max_iter = self.max_iter
1154
+
1155
+ if self.jitter is not None:
1156
+ rng = check_random_state(self.random_state)
1157
+
1158
+ noise = rng.uniform(high=self.jitter, size=len(y))
1159
+ y = y + noise
1160
+
1161
+ self._fit(
1162
+ X,
1163
+ y,
1164
+ max_iter=max_iter,
1165
+ alpha=alpha,
1166
+ fit_path=self.fit_path,
1167
+ Xy=Xy,
1168
+ )
1169
+
1170
+ return self
1171
+
1172
+
1173
+ class LassoLars(Lars):
1174
+ """Lasso model fit with Least Angle Regression a.k.a. Lars.
1175
+
1176
+ It is a Linear Model trained with an L1 prior as regularizer.
1177
+
1178
+ The optimization objective for Lasso is::
1179
+
1180
+ (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
1181
+
1182
+ Read more in the :ref:`User Guide <least_angle_regression>`.
1183
+
1184
+ Parameters
1185
+ ----------
1186
+ alpha : float, default=1.0
1187
+ Constant that multiplies the penalty term. Defaults to 1.0.
1188
+ ``alpha = 0`` is equivalent to an ordinary least square, solved
1189
+ by :class:`LinearRegression`. For numerical reasons, using
1190
+ ``alpha = 0`` with the LassoLars object is not advised and you
1191
+ should prefer the LinearRegression object.
1192
+
1193
+ fit_intercept : bool, default=True
1194
+ Whether to calculate the intercept for this model. If set
1195
+ to false, no intercept will be used in calculations
1196
+ (i.e. data is expected to be centered).
1197
+
1198
+ verbose : bool or int, default=False
1199
+ Sets the verbosity amount.
1200
+
1201
+ precompute : bool, 'auto' or array-like, default='auto'
1202
+ Whether to use a precomputed Gram matrix to speed up
1203
+ calculations. If set to ``'auto'`` let us decide. The Gram
1204
+ matrix can also be passed as argument.
1205
+
1206
+ max_iter : int, default=500
1207
+ Maximum number of iterations to perform.
1208
+
1209
+ eps : float, default=np.finfo(float).eps
1210
+ The machine-precision regularization in the computation of the
1211
+ Cholesky diagonal factors. Increase this for very ill-conditioned
1212
+ systems. Unlike the ``tol`` parameter in some iterative
1213
+ optimization-based algorithms, this parameter does not control
1214
+ the tolerance of the optimization.
1215
+
1216
+ copy_X : bool, default=True
1217
+ If True, X will be copied; else, it may be overwritten.
1218
+
1219
+ fit_path : bool, default=True
1220
+ If ``True`` the full path is stored in the ``coef_path_`` attribute.
1221
+ If you compute the solution for a large problem or many targets,
1222
+ setting ``fit_path`` to ``False`` will lead to a speedup, especially
1223
+ with a small alpha.
1224
+
1225
+ positive : bool, default=False
1226
+ Restrict coefficients to be >= 0. Be aware that you might want to
1227
+ remove fit_intercept which is set True by default.
1228
+ Under the positive restriction the model coefficients will not converge
1229
+ to the ordinary-least-squares solution for small values of alpha.
1230
+ Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
1231
+ 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
1232
+ algorithm are typically in congruence with the solution of the
1233
+ coordinate descent Lasso estimator.
1234
+
1235
+ jitter : float, default=None
1236
+ Upper bound on a uniform noise parameter to be added to the
1237
+ `y` values, to satisfy the model's assumption of
1238
+ one-at-a-time computations. Might help with stability.
1239
+
1240
+ .. versionadded:: 0.23
1241
+
1242
+ random_state : int, RandomState instance or None, default=None
1243
+ Determines random number generation for jittering. Pass an int
1244
+ for reproducible output across multiple function calls.
1245
+ See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
1246
+
1247
+ .. versionadded:: 0.23
1248
+
1249
+ Attributes
1250
+ ----------
1251
+ alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
1252
+ Maximum of covariances (in absolute value) at each iteration.
1253
+ ``n_alphas`` is either ``max_iter``, ``n_features`` or the
1254
+ number of nodes in the path with ``alpha >= alpha_min``, whichever
1255
+ is smaller. If this is a list of array-like, the length of the outer
1256
+ list is `n_targets`.
1257
+
1258
+ active_ : list of length n_alphas or list of such lists
1259
+ Indices of active variables at the end of the path.
1260
+ If this is a list of list, the length of the outer list is `n_targets`.
1261
+
1262
+ coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
1263
+ of such arrays
1264
+ If a list is passed it's expected to be one of n_targets such arrays.
1265
+ The varying values of the coefficients along the path. It is not
1266
+ present if the ``fit_path`` parameter is ``False``. If this is a list
1267
+ of array-like, the length of the outer list is `n_targets`.
1268
+
1269
+ coef_ : array-like of shape (n_features,) or (n_targets, n_features)
1270
+ Parameter vector (w in the formulation formula).
1271
+
1272
+ intercept_ : float or array-like of shape (n_targets,)
1273
+ Independent term in decision function.
1274
+
1275
+ n_iter_ : array-like or int
1276
+ The number of iterations taken by lars_path to find the
1277
+ grid of alphas for each target.
1278
+
1279
+ n_features_in_ : int
1280
+ Number of features seen during :term:`fit`.
1281
+
1282
+ .. versionadded:: 0.24
1283
+
1284
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1285
+ Names of features seen during :term:`fit`. Defined only when `X`
1286
+ has feature names that are all strings.
1287
+
1288
+ .. versionadded:: 1.0
1289
+
1290
+ See Also
1291
+ --------
1292
+ lars_path : Compute Least Angle Regression or Lasso
1293
+ path using LARS algorithm.
1294
+ lasso_path : Compute Lasso path with coordinate descent.
1295
+ Lasso : Linear Model trained with L1 prior as
1296
+ regularizer (aka the Lasso).
1297
+ LassoCV : Lasso linear model with iterative fitting
1298
+ along a regularization path.
1299
+ LassoLarsCV: Cross-validated Lasso, using the LARS algorithm.
1300
+ LassoLarsIC : Lasso model fit with Lars using BIC
1301
+ or AIC for model selection.
1302
+ sklearn.decomposition.sparse_encode : Sparse coding.
1303
+
1304
+ Examples
1305
+ --------
1306
+ >>> from sklearn import linear_model
1307
+ >>> reg = linear_model.LassoLars(alpha=0.01)
1308
+ >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
1309
+ LassoLars(alpha=0.01)
1310
+ >>> print(reg.coef_)
1311
+ [ 0. -0.955...]
1312
+ """
1313
+
1314
+ _parameter_constraints: dict = {
1315
+ **Lars._parameter_constraints,
1316
+ "alpha": [Interval(Real, 0, None, closed="left")],
1317
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
1318
+ "positive": ["boolean"],
1319
+ }
1320
+ _parameter_constraints.pop("n_nonzero_coefs")
1321
+
1322
+ method = "lasso"
1323
+
1324
+ def __init__(
1325
+ self,
1326
+ alpha=1.0,
1327
+ *,
1328
+ fit_intercept=True,
1329
+ verbose=False,
1330
+ precompute="auto",
1331
+ max_iter=500,
1332
+ eps=np.finfo(float).eps,
1333
+ copy_X=True,
1334
+ fit_path=True,
1335
+ positive=False,
1336
+ jitter=None,
1337
+ random_state=None,
1338
+ ):
1339
+ self.alpha = alpha
1340
+ self.fit_intercept = fit_intercept
1341
+ self.max_iter = max_iter
1342
+ self.verbose = verbose
1343
+ self.positive = positive
1344
+ self.precompute = precompute
1345
+ self.copy_X = copy_X
1346
+ self.eps = eps
1347
+ self.fit_path = fit_path
1348
+ self.jitter = jitter
1349
+ self.random_state = random_state
1350
+
1351
+
1352
+ ###############################################################################
1353
+ # Cross-validated estimator classes
1354
+
1355
+
1356
+ def _check_copy_and_writeable(array, copy=False):
1357
+ if copy or not array.flags.writeable:
1358
+ return array.copy()
1359
+ return array
1360
+
1361
+
1362
+ def _lars_path_residues(
1363
+ X_train,
1364
+ y_train,
1365
+ X_test,
1366
+ y_test,
1367
+ Gram=None,
1368
+ copy=True,
1369
+ method="lar",
1370
+ verbose=False,
1371
+ fit_intercept=True,
1372
+ max_iter=500,
1373
+ eps=np.finfo(float).eps,
1374
+ positive=False,
1375
+ ):
1376
+ """Compute the residues on left-out data for a full LARS path
1377
+
1378
+ Parameters
1379
+ -----------
1380
+ X_train : array-like of shape (n_samples, n_features)
1381
+ The data to fit the LARS on
1382
+
1383
+ y_train : array-like of shape (n_samples,)
1384
+ The target variable to fit LARS on
1385
+
1386
+ X_test : array-like of shape (n_samples, n_features)
1387
+ The data to compute the residues on
1388
+
1389
+ y_test : array-like of shape (n_samples,)
1390
+ The target variable to compute the residues on
1391
+
1392
+ Gram : None, 'auto' or array-like of shape (n_features, n_features), \
1393
+ default=None
1394
+ Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
1395
+ matrix is precomputed from the given X, if there are more samples
1396
+ than features
1397
+
1398
+ copy : bool, default=True
1399
+ Whether X_train, X_test, y_train and y_test should be copied;
1400
+ if False, they may be overwritten.
1401
+
1402
+ method : {'lar' , 'lasso'}, default='lar'
1403
+ Specifies the returned model. Select ``'lar'`` for Least Angle
1404
+ Regression, ``'lasso'`` for the Lasso.
1405
+
1406
+ verbose : bool or int, default=False
1407
+ Sets the amount of verbosity
1408
+
1409
+ fit_intercept : bool, default=True
1410
+ whether to calculate the intercept for this model. If set
1411
+ to false, no intercept will be used in calculations
1412
+ (i.e. data is expected to be centered).
1413
+
1414
+ positive : bool, default=False
1415
+ Restrict coefficients to be >= 0. Be aware that you might want to
1416
+ remove fit_intercept which is set True by default.
1417
+ See reservations for using this option in combination with method
1418
+ 'lasso' for expected small values of alpha in the doc of LassoLarsCV
1419
+ and LassoLarsIC.
1420
+
1421
+ max_iter : int, default=500
1422
+ Maximum number of iterations to perform.
1423
+
1424
+ eps : float, default=np.finfo(float).eps
1425
+ The machine-precision regularization in the computation of the
1426
+ Cholesky diagonal factors. Increase this for very ill-conditioned
1427
+ systems. Unlike the ``tol`` parameter in some iterative
1428
+ optimization-based algorithms, this parameter does not control
1429
+ the tolerance of the optimization.
1430
+
1431
+ Returns
1432
+ --------
1433
+ alphas : array-like of shape (n_alphas,)
1434
+ Maximum of covariances (in absolute value) at each iteration.
1435
+ ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
1436
+ is smaller.
1437
+
1438
+ active : list
1439
+ Indices of active variables at the end of the path.
1440
+
1441
+ coefs : array-like of shape (n_features, n_alphas)
1442
+ Coefficients along the path
1443
+
1444
+ residues : array-like of shape (n_alphas, n_samples)
1445
+ Residues of the prediction on the test data
1446
+ """
1447
+ X_train = _check_copy_and_writeable(X_train, copy)
1448
+ y_train = _check_copy_and_writeable(y_train, copy)
1449
+ X_test = _check_copy_and_writeable(X_test, copy)
1450
+ y_test = _check_copy_and_writeable(y_test, copy)
1451
+
1452
+ if fit_intercept:
1453
+ X_mean = X_train.mean(axis=0)
1454
+ X_train -= X_mean
1455
+ X_test -= X_mean
1456
+ y_mean = y_train.mean(axis=0)
1457
+ y_train = as_float_array(y_train, copy=False)
1458
+ y_train -= y_mean
1459
+ y_test = as_float_array(y_test, copy=False)
1460
+ y_test -= y_mean
1461
+
1462
+ alphas, active, coefs = lars_path(
1463
+ X_train,
1464
+ y_train,
1465
+ Gram=Gram,
1466
+ copy_X=False,
1467
+ copy_Gram=False,
1468
+ method=method,
1469
+ verbose=max(0, verbose - 1),
1470
+ max_iter=max_iter,
1471
+ eps=eps,
1472
+ positive=positive,
1473
+ )
1474
+ residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
1475
+ return alphas, active, coefs, residues.T
1476
+
1477
+
1478
+ class LarsCV(Lars):
1479
+ """Cross-validated Least Angle Regression model.
1480
+
1481
+ See glossary entry for :term:`cross-validation estimator`.
1482
+
1483
+ Read more in the :ref:`User Guide <least_angle_regression>`.
1484
+
1485
+ Parameters
1486
+ ----------
1487
+ fit_intercept : bool, default=True
1488
+ Whether to calculate the intercept for this model. If set
1489
+ to false, no intercept will be used in calculations
1490
+ (i.e. data is expected to be centered).
1491
+
1492
+ verbose : bool or int, default=False
1493
+ Sets the verbosity amount.
1494
+
1495
+ max_iter : int, default=500
1496
+ Maximum number of iterations to perform.
1497
+
1498
+ precompute : bool, 'auto' or array-like , default='auto'
1499
+ Whether to use a precomputed Gram matrix to speed up
1500
+ calculations. If set to ``'auto'`` let us decide. The Gram matrix
1501
+ cannot be passed as argument since we will use only subsets of X.
1502
+
1503
+ cv : int, cross-validation generator or an iterable, default=None
1504
+ Determines the cross-validation splitting strategy.
1505
+ Possible inputs for cv are:
1506
+
1507
+ - None, to use the default 5-fold cross-validation,
1508
+ - integer, to specify the number of folds.
1509
+ - :term:`CV splitter`,
1510
+ - An iterable yielding (train, test) splits as arrays of indices.
1511
+
1512
+ For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
1513
+
1514
+ Refer :ref:`User Guide <cross_validation>` for the various
1515
+ cross-validation strategies that can be used here.
1516
+
1517
+ .. versionchanged:: 0.22
1518
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1519
+
1520
+ max_n_alphas : int, default=1000
1521
+ The maximum number of points on the path used to compute the
1522
+ residuals in the cross-validation.
1523
+
1524
+ n_jobs : int or None, default=None
1525
+ Number of CPUs to use during the cross validation.
1526
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1527
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1528
+ for more details.
1529
+
1530
+ eps : float, default=np.finfo(float).eps
1531
+ The machine-precision regularization in the computation of the
1532
+ Cholesky diagonal factors. Increase this for very ill-conditioned
1533
+ systems. Unlike the ``tol`` parameter in some iterative
1534
+ optimization-based algorithms, this parameter does not control
1535
+ the tolerance of the optimization.
1536
+
1537
+ copy_X : bool, default=True
1538
+ If ``True``, X will be copied; else, it may be overwritten.
1539
+
1540
+ Attributes
1541
+ ----------
1542
+ active_ : list of length n_alphas or list of such lists
1543
+ Indices of active variables at the end of the path.
1544
+ If this is a list of lists, the outer list length is `n_targets`.
1545
+
1546
+ coef_ : array-like of shape (n_features,)
1547
+ parameter vector (w in the formulation formula)
1548
+
1549
+ intercept_ : float
1550
+ independent term in decision function
1551
+
1552
+ coef_path_ : array-like of shape (n_features, n_alphas)
1553
+ the varying values of the coefficients along the path
1554
+
1555
+ alpha_ : float
1556
+ the estimated regularization parameter alpha
1557
+
1558
+ alphas_ : array-like of shape (n_alphas,)
1559
+ the different values of alpha along the path
1560
+
1561
+ cv_alphas_ : array-like of shape (n_cv_alphas,)
1562
+ all the values of alpha along the path for the different folds
1563
+
1564
+ mse_path_ : array-like of shape (n_folds, n_cv_alphas)
1565
+ the mean square error on left-out for each fold along the path
1566
+ (alpha values given by ``cv_alphas``)
1567
+
1568
+ n_iter_ : array-like or int
1569
+ the number of iterations run by Lars with the optimal alpha.
1570
+
1571
+ n_features_in_ : int
1572
+ Number of features seen during :term:`fit`.
1573
+
1574
+ .. versionadded:: 0.24
1575
+
1576
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1577
+ Names of features seen during :term:`fit`. Defined only when `X`
1578
+ has feature names that are all strings.
1579
+
1580
+ .. versionadded:: 1.0
1581
+
1582
+ See Also
1583
+ --------
1584
+ lars_path : Compute Least Angle Regression or Lasso
1585
+ path using LARS algorithm.
1586
+ lasso_path : Compute Lasso path with coordinate descent.
1587
+ Lasso : Linear Model trained with L1 prior as
1588
+ regularizer (aka the Lasso).
1589
+ LassoCV : Lasso linear model with iterative fitting
1590
+ along a regularization path.
1591
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
1592
+ LassoLarsIC : Lasso model fit with Lars using BIC
1593
+ or AIC for model selection.
1594
+ sklearn.decomposition.sparse_encode : Sparse coding.
1595
+
1596
+ Notes
1597
+ -----
1598
+ In `fit`, once the best parameter `alpha` is found through
1599
+ cross-validation, the model is fit again using the entire training set.
1600
+
1601
+ Examples
1602
+ --------
1603
+ >>> from sklearn.linear_model import LarsCV
1604
+ >>> from sklearn.datasets import make_regression
1605
+ >>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)
1606
+ >>> reg = LarsCV(cv=5).fit(X, y)
1607
+ >>> reg.score(X, y)
1608
+ 0.9996...
1609
+ >>> reg.alpha_
1610
+ 0.2961...
1611
+ >>> reg.predict(X[:1,])
1612
+ array([154.3996...])
1613
+ """
1614
+
1615
+ _parameter_constraints: dict = {
1616
+ **Lars._parameter_constraints,
1617
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
1618
+ "cv": ["cv_object"],
1619
+ "max_n_alphas": [Interval(Integral, 1, None, closed="left")],
1620
+ "n_jobs": [Integral, None],
1621
+ }
1622
+
1623
+ for parameter in ["n_nonzero_coefs", "jitter", "fit_path", "random_state"]:
1624
+ _parameter_constraints.pop(parameter)
1625
+
1626
+ method = "lar"
1627
+
1628
+ def __init__(
1629
+ self,
1630
+ *,
1631
+ fit_intercept=True,
1632
+ verbose=False,
1633
+ max_iter=500,
1634
+ precompute="auto",
1635
+ cv=None,
1636
+ max_n_alphas=1000,
1637
+ n_jobs=None,
1638
+ eps=np.finfo(float).eps,
1639
+ copy_X=True,
1640
+ ):
1641
+ self.max_iter = max_iter
1642
+ self.cv = cv
1643
+ self.max_n_alphas = max_n_alphas
1644
+ self.n_jobs = n_jobs
1645
+ super().__init__(
1646
+ fit_intercept=fit_intercept,
1647
+ verbose=verbose,
1648
+ precompute=precompute,
1649
+ n_nonzero_coefs=500,
1650
+ eps=eps,
1651
+ copy_X=copy_X,
1652
+ fit_path=True,
1653
+ )
1654
+
1655
+ def _more_tags(self):
1656
+ return {"multioutput": False}
1657
+
1658
+ @_fit_context(prefer_skip_nested_validation=True)
1659
+ def fit(self, X, y, **params):
1660
+ """Fit the model using X, y as training data.
1661
+
1662
+ Parameters
1663
+ ----------
1664
+ X : array-like of shape (n_samples, n_features)
1665
+ Training data.
1666
+
1667
+ y : array-like of shape (n_samples,)
1668
+ Target values.
1669
+
1670
+ **params : dict, default=None
1671
+ Parameters to be passed to the CV splitter.
1672
+
1673
+ .. versionadded:: 1.4
1674
+ Only available if `enable_metadata_routing=True`,
1675
+ which can be set by using
1676
+ ``sklearn.set_config(enable_metadata_routing=True)``.
1677
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
1678
+ more details.
1679
+
1680
+ Returns
1681
+ -------
1682
+ self : object
1683
+ Returns an instance of self.
1684
+ """
1685
+ _raise_for_params(params, self, "fit")
1686
+
1687
+ X, y = self._validate_data(X, y, y_numeric=True)
1688
+ X = as_float_array(X, copy=self.copy_X)
1689
+ y = as_float_array(y, copy=self.copy_X)
1690
+
1691
+ # init cross-validation generator
1692
+ cv = check_cv(self.cv, classifier=False)
1693
+
1694
+ if _routing_enabled():
1695
+ routed_params = process_routing(self, "fit", **params)
1696
+ else:
1697
+ routed_params = Bunch(splitter=Bunch(split={}))
1698
+
1699
+ # As we use cross-validation, the Gram matrix is not precomputed here
1700
+ Gram = self.precompute
1701
+ if hasattr(Gram, "__array__"):
1702
+ warnings.warn(
1703
+ 'Parameter "precompute" cannot be an array in '
1704
+ '%s. Automatically switch to "auto" instead.'
1705
+ % self.__class__.__name__
1706
+ )
1707
+ Gram = "auto"
1708
+
1709
+ cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
1710
+ delayed(_lars_path_residues)(
1711
+ X[train],
1712
+ y[train],
1713
+ X[test],
1714
+ y[test],
1715
+ Gram=Gram,
1716
+ copy=False,
1717
+ method=self.method,
1718
+ verbose=max(0, self.verbose - 1),
1719
+ fit_intercept=self.fit_intercept,
1720
+ max_iter=self.max_iter,
1721
+ eps=self.eps,
1722
+ positive=self.positive,
1723
+ )
1724
+ for train, test in cv.split(X, y, **routed_params.splitter.split)
1725
+ )
1726
+ all_alphas = np.concatenate(list(zip(*cv_paths))[0])
1727
+ # Unique also sorts
1728
+ all_alphas = np.unique(all_alphas)
1729
+ # Take at most max_n_alphas values
1730
+ stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
1731
+ all_alphas = all_alphas[::stride]
1732
+
1733
+ mse_path = np.empty((len(all_alphas), len(cv_paths)))
1734
+ for index, (alphas, _, _, residues) in enumerate(cv_paths):
1735
+ alphas = alphas[::-1]
1736
+ residues = residues[::-1]
1737
+ if alphas[0] != 0:
1738
+ alphas = np.r_[0, alphas]
1739
+ residues = np.r_[residues[0, np.newaxis], residues]
1740
+ if alphas[-1] != all_alphas[-1]:
1741
+ alphas = np.r_[alphas, all_alphas[-1]]
1742
+ residues = np.r_[residues, residues[-1, np.newaxis]]
1743
+ this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas)
1744
+ this_residues **= 2
1745
+ mse_path[:, index] = np.mean(this_residues, axis=-1)
1746
+
1747
+ mask = np.all(np.isfinite(mse_path), axis=-1)
1748
+ all_alphas = all_alphas[mask]
1749
+ mse_path = mse_path[mask]
1750
+ # Select the alpha that minimizes left-out error
1751
+ i_best_alpha = np.argmin(mse_path.mean(axis=-1))
1752
+ best_alpha = all_alphas[i_best_alpha]
1753
+
1754
+ # Store our parameters
1755
+ self.alpha_ = best_alpha
1756
+ self.cv_alphas_ = all_alphas
1757
+ self.mse_path_ = mse_path
1758
+
1759
+ # Now compute the full model using best_alpha
1760
+ # it will call a lasso internally when self if LassoLarsCV
1761
+ # as self.method == 'lasso'
1762
+ self._fit(
1763
+ X,
1764
+ y,
1765
+ max_iter=self.max_iter,
1766
+ alpha=best_alpha,
1767
+ Xy=None,
1768
+ fit_path=True,
1769
+ )
1770
+ return self
1771
+
1772
+ def get_metadata_routing(self):
1773
+ """Get metadata routing of this object.
1774
+
1775
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1776
+ mechanism works.
1777
+
1778
+ .. versionadded:: 1.4
1779
+
1780
+ Returns
1781
+ -------
1782
+ routing : MetadataRouter
1783
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1784
+ routing information.
1785
+ """
1786
+ router = MetadataRouter(owner=self.__class__.__name__).add(
1787
+ splitter=check_cv(self.cv),
1788
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
1789
+ )
1790
+ return router
1791
+
1792
+
1793
+ class LassoLarsCV(LarsCV):
1794
+ """Cross-validated Lasso, using the LARS algorithm.
1795
+
1796
+ See glossary entry for :term:`cross-validation estimator`.
1797
+
1798
+ The optimization objective for Lasso is::
1799
+
1800
+ (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
1801
+
1802
+ Read more in the :ref:`User Guide <least_angle_regression>`.
1803
+
1804
+ Parameters
1805
+ ----------
1806
+ fit_intercept : bool, default=True
1807
+ Whether to calculate the intercept for this model. If set
1808
+ to false, no intercept will be used in calculations
1809
+ (i.e. data is expected to be centered).
1810
+
1811
+ verbose : bool or int, default=False
1812
+ Sets the verbosity amount.
1813
+
1814
+ max_iter : int, default=500
1815
+ Maximum number of iterations to perform.
1816
+
1817
+ precompute : bool or 'auto' , default='auto'
1818
+ Whether to use a precomputed Gram matrix to speed up
1819
+ calculations. If set to ``'auto'`` let us decide. The Gram matrix
1820
+ cannot be passed as argument since we will use only subsets of X.
1821
+
1822
+ cv : int, cross-validation generator or an iterable, default=None
1823
+ Determines the cross-validation splitting strategy.
1824
+ Possible inputs for cv are:
1825
+
1826
+ - None, to use the default 5-fold cross-validation,
1827
+ - integer, to specify the number of folds.
1828
+ - :term:`CV splitter`,
1829
+ - An iterable yielding (train, test) splits as arrays of indices.
1830
+
1831
+ For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
1832
+
1833
+ Refer :ref:`User Guide <cross_validation>` for the various
1834
+ cross-validation strategies that can be used here.
1835
+
1836
+ .. versionchanged:: 0.22
1837
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1838
+
1839
+ max_n_alphas : int, default=1000
1840
+ The maximum number of points on the path used to compute the
1841
+ residuals in the cross-validation.
1842
+
1843
+ n_jobs : int or None, default=None
1844
+ Number of CPUs to use during the cross validation.
1845
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1846
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1847
+ for more details.
1848
+
1849
+ eps : float, default=np.finfo(float).eps
1850
+ The machine-precision regularization in the computation of the
1851
+ Cholesky diagonal factors. Increase this for very ill-conditioned
1852
+ systems. Unlike the ``tol`` parameter in some iterative
1853
+ optimization-based algorithms, this parameter does not control
1854
+ the tolerance of the optimization.
1855
+
1856
+ copy_X : bool, default=True
1857
+ If True, X will be copied; else, it may be overwritten.
1858
+
1859
+ positive : bool, default=False
1860
+ Restrict coefficients to be >= 0. Be aware that you might want to
1861
+ remove fit_intercept which is set True by default.
1862
+ Under the positive restriction the model coefficients do not converge
1863
+ to the ordinary-least-squares solution for small values of alpha.
1864
+ Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
1865
+ 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
1866
+ algorithm are typically in congruence with the solution of the
1867
+ coordinate descent Lasso estimator.
1868
+ As a consequence using LassoLarsCV only makes sense for problems where
1869
+ a sparse solution is expected and/or reached.
1870
+
1871
+ Attributes
1872
+ ----------
1873
+ coef_ : array-like of shape (n_features,)
1874
+ parameter vector (w in the formulation formula)
1875
+
1876
+ intercept_ : float
1877
+ independent term in decision function.
1878
+
1879
+ coef_path_ : array-like of shape (n_features, n_alphas)
1880
+ the varying values of the coefficients along the path
1881
+
1882
+ alpha_ : float
1883
+ the estimated regularization parameter alpha
1884
+
1885
+ alphas_ : array-like of shape (n_alphas,)
1886
+ the different values of alpha along the path
1887
+
1888
+ cv_alphas_ : array-like of shape (n_cv_alphas,)
1889
+ all the values of alpha along the path for the different folds
1890
+
1891
+ mse_path_ : array-like of shape (n_folds, n_cv_alphas)
1892
+ the mean square error on left-out for each fold along the path
1893
+ (alpha values given by ``cv_alphas``)
1894
+
1895
+ n_iter_ : array-like or int
1896
+ the number of iterations run by Lars with the optimal alpha.
1897
+
1898
+ active_ : list of int
1899
+ Indices of active variables at the end of the path.
1900
+
1901
+ n_features_in_ : int
1902
+ Number of features seen during :term:`fit`.
1903
+
1904
+ .. versionadded:: 0.24
1905
+
1906
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1907
+ Names of features seen during :term:`fit`. Defined only when `X`
1908
+ has feature names that are all strings.
1909
+
1910
+ .. versionadded:: 1.0
1911
+
1912
+ See Also
1913
+ --------
1914
+ lars_path : Compute Least Angle Regression or Lasso
1915
+ path using LARS algorithm.
1916
+ lasso_path : Compute Lasso path with coordinate descent.
1917
+ Lasso : Linear Model trained with L1 prior as
1918
+ regularizer (aka the Lasso).
1919
+ LassoCV : Lasso linear model with iterative fitting
1920
+ along a regularization path.
1921
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
1922
+ LassoLarsIC : Lasso model fit with Lars using BIC
1923
+ or AIC for model selection.
1924
+ sklearn.decomposition.sparse_encode : Sparse coding.
1925
+
1926
+ Notes
1927
+ -----
1928
+ The object solves the same problem as the
1929
+ :class:`~sklearn.linear_model.LassoCV` object. However, unlike the
1930
+ :class:`~sklearn.linear_model.LassoCV`, it find the relevant alphas values
1931
+ by itself. In general, because of this property, it will be more stable.
1932
+ However, it is more fragile to heavily multicollinear datasets.
1933
+
1934
+ It is more efficient than the :class:`~sklearn.linear_model.LassoCV` if
1935
+ only a small number of features are selected compared to the total number,
1936
+ for instance if there are very few samples compared to the number of
1937
+ features.
1938
+
1939
+ In `fit`, once the best parameter `alpha` is found through
1940
+ cross-validation, the model is fit again using the entire training set.
1941
+
1942
+ Examples
1943
+ --------
1944
+ >>> from sklearn.linear_model import LassoLarsCV
1945
+ >>> from sklearn.datasets import make_regression
1946
+ >>> X, y = make_regression(noise=4.0, random_state=0)
1947
+ >>> reg = LassoLarsCV(cv=5).fit(X, y)
1948
+ >>> reg.score(X, y)
1949
+ 0.9993...
1950
+ >>> reg.alpha_
1951
+ 0.3972...
1952
+ >>> reg.predict(X[:1,])
1953
+ array([-78.4831...])
1954
+ """
1955
+
1956
+ _parameter_constraints = {
1957
+ **LarsCV._parameter_constraints,
1958
+ "positive": ["boolean"],
1959
+ }
1960
+
1961
+ method = "lasso"
1962
+
1963
+ def __init__(
1964
+ self,
1965
+ *,
1966
+ fit_intercept=True,
1967
+ verbose=False,
1968
+ max_iter=500,
1969
+ precompute="auto",
1970
+ cv=None,
1971
+ max_n_alphas=1000,
1972
+ n_jobs=None,
1973
+ eps=np.finfo(float).eps,
1974
+ copy_X=True,
1975
+ positive=False,
1976
+ ):
1977
+ self.fit_intercept = fit_intercept
1978
+ self.verbose = verbose
1979
+ self.max_iter = max_iter
1980
+ self.precompute = precompute
1981
+ self.cv = cv
1982
+ self.max_n_alphas = max_n_alphas
1983
+ self.n_jobs = n_jobs
1984
+ self.eps = eps
1985
+ self.copy_X = copy_X
1986
+ self.positive = positive
1987
+ # XXX : we don't use super().__init__
1988
+ # to avoid setting n_nonzero_coefs
1989
+
1990
+
1991
+ class LassoLarsIC(LassoLars):
1992
+ """Lasso model fit with Lars using BIC or AIC for model selection.
1993
+
1994
+ The optimization objective for Lasso is::
1995
+
1996
+ (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
1997
+
1998
+ AIC is the Akaike information criterion [2]_ and BIC is the Bayes
1999
+ Information criterion [3]_. Such criteria are useful to select the value
2000
+ of the regularization parameter by making a trade-off between the
2001
+ goodness of fit and the complexity of the model. A good model should
2002
+ explain well the data while being simple.
2003
+
2004
+ Read more in the :ref:`User Guide <lasso_lars_ic>`.
2005
+
2006
+ Parameters
2007
+ ----------
2008
+ criterion : {'aic', 'bic'}, default='aic'
2009
+ The type of criterion to use.
2010
+
2011
+ fit_intercept : bool, default=True
2012
+ Whether to calculate the intercept for this model. If set
2013
+ to false, no intercept will be used in calculations
2014
+ (i.e. data is expected to be centered).
2015
+
2016
+ verbose : bool or int, default=False
2017
+ Sets the verbosity amount.
2018
+
2019
+ precompute : bool, 'auto' or array-like, default='auto'
2020
+ Whether to use a precomputed Gram matrix to speed up
2021
+ calculations. If set to ``'auto'`` let us decide. The Gram
2022
+ matrix can also be passed as argument.
2023
+
2024
+ max_iter : int, default=500
2025
+ Maximum number of iterations to perform. Can be used for
2026
+ early stopping.
2027
+
2028
+ eps : float, default=np.finfo(float).eps
2029
+ The machine-precision regularization in the computation of the
2030
+ Cholesky diagonal factors. Increase this for very ill-conditioned
2031
+ systems. Unlike the ``tol`` parameter in some iterative
2032
+ optimization-based algorithms, this parameter does not control
2033
+ the tolerance of the optimization.
2034
+
2035
+ copy_X : bool, default=True
2036
+ If True, X will be copied; else, it may be overwritten.
2037
+
2038
+ positive : bool, default=False
2039
+ Restrict coefficients to be >= 0. Be aware that you might want to
2040
+ remove fit_intercept which is set True by default.
2041
+ Under the positive restriction the model coefficients do not converge
2042
+ to the ordinary-least-squares solution for small values of alpha.
2043
+ Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
2044
+ 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
2045
+ algorithm are typically in congruence with the solution of the
2046
+ coordinate descent Lasso estimator.
2047
+ As a consequence using LassoLarsIC only makes sense for problems where
2048
+ a sparse solution is expected and/or reached.
2049
+
2050
+ noise_variance : float, default=None
2051
+ The estimated noise variance of the data. If `None`, an unbiased
2052
+ estimate is computed by an OLS model. However, it is only possible
2053
+ in the case where `n_samples > n_features + fit_intercept`.
2054
+
2055
+ .. versionadded:: 1.1
2056
+
2057
+ Attributes
2058
+ ----------
2059
+ coef_ : array-like of shape (n_features,)
2060
+ parameter vector (w in the formulation formula)
2061
+
2062
+ intercept_ : float
2063
+ independent term in decision function.
2064
+
2065
+ alpha_ : float
2066
+ the alpha parameter chosen by the information criterion
2067
+
2068
+ alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
2069
+ Maximum of covariances (in absolute value) at each iteration.
2070
+ ``n_alphas`` is either ``max_iter``, ``n_features`` or the
2071
+ number of nodes in the path with ``alpha >= alpha_min``, whichever
2072
+ is smaller. If a list, it will be of length `n_targets`.
2073
+
2074
+ n_iter_ : int
2075
+ number of iterations run by lars_path to find the grid of
2076
+ alphas.
2077
+
2078
+ criterion_ : array-like of shape (n_alphas,)
2079
+ The value of the information criteria ('aic', 'bic') across all
2080
+ alphas. The alpha which has the smallest information criterion is
2081
+ chosen, as specified in [1]_.
2082
+
2083
+ noise_variance_ : float
2084
+ The estimated noise variance from the data used to compute the
2085
+ criterion.
2086
+
2087
+ .. versionadded:: 1.1
2088
+
2089
+ n_features_in_ : int
2090
+ Number of features seen during :term:`fit`.
2091
+
2092
+ .. versionadded:: 0.24
2093
+
2094
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
2095
+ Names of features seen during :term:`fit`. Defined only when `X`
2096
+ has feature names that are all strings.
2097
+
2098
+ .. versionadded:: 1.0
2099
+
2100
+ See Also
2101
+ --------
2102
+ lars_path : Compute Least Angle Regression or Lasso
2103
+ path using LARS algorithm.
2104
+ lasso_path : Compute Lasso path with coordinate descent.
2105
+ Lasso : Linear Model trained with L1 prior as
2106
+ regularizer (aka the Lasso).
2107
+ LassoCV : Lasso linear model with iterative fitting
2108
+ along a regularization path.
2109
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
2110
+ LassoLarsCV: Cross-validated Lasso, using the LARS algorithm.
2111
+ sklearn.decomposition.sparse_encode : Sparse coding.
2112
+
2113
+ Notes
2114
+ -----
2115
+ The number of degrees of freedom is computed as in [1]_.
2116
+
2117
+ To have more details regarding the mathematical formulation of the
2118
+ AIC and BIC criteria, please refer to :ref:`User Guide <lasso_lars_ic>`.
2119
+
2120
+ References
2121
+ ----------
2122
+ .. [1] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani.
2123
+ "On the degrees of freedom of the lasso."
2124
+ The Annals of Statistics 35.5 (2007): 2173-2192.
2125
+ <0712.0881>`
2126
+
2127
+ .. [2] `Wikipedia entry on the Akaike information criterion
2128
+ <https://en.wikipedia.org/wiki/Akaike_information_criterion>`_
2129
+
2130
+ .. [3] `Wikipedia entry on the Bayesian information criterion
2131
+ <https://en.wikipedia.org/wiki/Bayesian_information_criterion>`_
2132
+
2133
+ Examples
2134
+ --------
2135
+ >>> from sklearn import linear_model
2136
+ >>> reg = linear_model.LassoLarsIC(criterion='bic')
2137
+ >>> X = [[-2, 2], [-1, 1], [0, 0], [1, 1], [2, 2]]
2138
+ >>> y = [-2.2222, -1.1111, 0, -1.1111, -2.2222]
2139
+ >>> reg.fit(X, y)
2140
+ LassoLarsIC(criterion='bic')
2141
+ >>> print(reg.coef_)
2142
+ [ 0. -1.11...]
2143
+ """
2144
+
2145
+ _parameter_constraints: dict = {
2146
+ **LassoLars._parameter_constraints,
2147
+ "criterion": [StrOptions({"aic", "bic"})],
2148
+ "noise_variance": [Interval(Real, 0, None, closed="left"), None],
2149
+ }
2150
+
2151
+ for parameter in ["jitter", "fit_path", "alpha", "random_state"]:
2152
+ _parameter_constraints.pop(parameter)
2153
+
2154
+ def __init__(
2155
+ self,
2156
+ criterion="aic",
2157
+ *,
2158
+ fit_intercept=True,
2159
+ verbose=False,
2160
+ precompute="auto",
2161
+ max_iter=500,
2162
+ eps=np.finfo(float).eps,
2163
+ copy_X=True,
2164
+ positive=False,
2165
+ noise_variance=None,
2166
+ ):
2167
+ self.criterion = criterion
2168
+ self.fit_intercept = fit_intercept
2169
+ self.positive = positive
2170
+ self.max_iter = max_iter
2171
+ self.verbose = verbose
2172
+ self.copy_X = copy_X
2173
+ self.precompute = precompute
2174
+ self.eps = eps
2175
+ self.fit_path = True
2176
+ self.noise_variance = noise_variance
2177
+
2178
+ def _more_tags(self):
2179
+ return {"multioutput": False}
2180
+
2181
+ @_fit_context(prefer_skip_nested_validation=True)
2182
+ def fit(self, X, y, copy_X=None):
2183
+ """Fit the model using X, y as training data.
2184
+
2185
+ Parameters
2186
+ ----------
2187
+ X : array-like of shape (n_samples, n_features)
2188
+ Training data.
2189
+
2190
+ y : array-like of shape (n_samples,)
2191
+ Target values. Will be cast to X's dtype if necessary.
2192
+
2193
+ copy_X : bool, default=None
2194
+ If provided, this parameter will override the choice
2195
+ of copy_X made at instance creation.
2196
+ If ``True``, X will be copied; else, it may be overwritten.
2197
+
2198
+ Returns
2199
+ -------
2200
+ self : object
2201
+ Returns an instance of self.
2202
+ """
2203
+ if copy_X is None:
2204
+ copy_X = self.copy_X
2205
+ X, y = self._validate_data(X, y, y_numeric=True)
2206
+
2207
+ X, y, Xmean, ymean, Xstd = _preprocess_data(
2208
+ X, y, fit_intercept=self.fit_intercept, copy=copy_X
2209
+ )
2210
+
2211
+ Gram = self.precompute
2212
+
2213
+ alphas_, _, coef_path_, self.n_iter_ = lars_path(
2214
+ X,
2215
+ y,
2216
+ Gram=Gram,
2217
+ copy_X=copy_X,
2218
+ copy_Gram=True,
2219
+ alpha_min=0.0,
2220
+ method="lasso",
2221
+ verbose=self.verbose,
2222
+ max_iter=self.max_iter,
2223
+ eps=self.eps,
2224
+ return_n_iter=True,
2225
+ positive=self.positive,
2226
+ )
2227
+
2228
+ n_samples = X.shape[0]
2229
+
2230
+ if self.criterion == "aic":
2231
+ criterion_factor = 2
2232
+ elif self.criterion == "bic":
2233
+ criterion_factor = log(n_samples)
2234
+ else:
2235
+ raise ValueError(
2236
+ f"criterion should be either bic or aic, got {self.criterion!r}"
2237
+ )
2238
+
2239
+ residuals = y[:, np.newaxis] - np.dot(X, coef_path_)
2240
+ residuals_sum_squares = np.sum(residuals**2, axis=0)
2241
+ degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int)
2242
+ for k, coef in enumerate(coef_path_.T):
2243
+ mask = np.abs(coef) > np.finfo(coef.dtype).eps
2244
+ if not np.any(mask):
2245
+ continue
2246
+ # get the number of degrees of freedom equal to:
2247
+ # Xc = X[:, mask]
2248
+ # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
2249
+ degrees_of_freedom[k] = np.sum(mask)
2250
+
2251
+ self.alphas_ = alphas_
2252
+
2253
+ if self.noise_variance is None:
2254
+ self.noise_variance_ = self._estimate_noise_variance(
2255
+ X, y, positive=self.positive
2256
+ )
2257
+ else:
2258
+ self.noise_variance_ = self.noise_variance
2259
+
2260
+ self.criterion_ = (
2261
+ n_samples * np.log(2 * np.pi * self.noise_variance_)
2262
+ + residuals_sum_squares / self.noise_variance_
2263
+ + criterion_factor * degrees_of_freedom
2264
+ )
2265
+ n_best = np.argmin(self.criterion_)
2266
+
2267
+ self.alpha_ = alphas_[n_best]
2268
+ self.coef_ = coef_path_[:, n_best]
2269
+ self._set_intercept(Xmean, ymean, Xstd)
2270
+ return self
2271
+
2272
+ def _estimate_noise_variance(self, X, y, positive):
2273
+ """Compute an estimate of the variance with an OLS model.
2274
+
2275
+ Parameters
2276
+ ----------
2277
+ X : ndarray of shape (n_samples, n_features)
2278
+ Data to be fitted by the OLS model. We expect the data to be
2279
+ centered.
2280
+
2281
+ y : ndarray of shape (n_samples,)
2282
+ Associated target.
2283
+
2284
+ positive : bool, default=False
2285
+ Restrict coefficients to be >= 0. This should be inline with
2286
+ the `positive` parameter from `LassoLarsIC`.
2287
+
2288
+ Returns
2289
+ -------
2290
+ noise_variance : float
2291
+ An estimator of the noise variance of an OLS model.
2292
+ """
2293
+ if X.shape[0] <= X.shape[1] + self.fit_intercept:
2294
+ raise ValueError(
2295
+ f"You are using {self.__class__.__name__} in the case where the number "
2296
+ "of samples is smaller than the number of features. In this setting, "
2297
+ "getting a good estimate for the variance of the noise is not "
2298
+ "possible. Provide an estimate of the noise variance in the "
2299
+ "constructor."
2300
+ )
2301
+ # X and y are already centered and we don't need to fit with an intercept
2302
+ ols_model = LinearRegression(positive=positive, fit_intercept=False)
2303
+ y_pred = ols_model.fit(X, y).predict(X)
2304
+ return np.sum((y - y_pred) ** 2) / (
2305
+ X.shape[0] - X.shape[1] - self.fit_intercept
2306
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py ADDED
@@ -0,0 +1,671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Loss functions for linear models with raw_prediction = X @ coef
3
+ """
4
+ import numpy as np
5
+ from scipy import sparse
6
+
7
+ from ..utils.extmath import squared_norm
8
+
9
+
10
+ class LinearModelLoss:
11
+ """General class for loss functions with raw_prediction = X @ coef + intercept.
12
+
13
+ Note that raw_prediction is also known as linear predictor.
14
+
15
+ The loss is the average of per sample losses and includes a term for L2
16
+ regularization::
17
+
18
+ loss = 1 / s_sum * sum_i s_i loss(y_i, X_i @ coef + intercept)
19
+ + 1/2 * l2_reg_strength * ||coef||_2^2
20
+
21
+ with sample weights s_i=1 if sample_weight=None and s_sum=sum_i s_i.
22
+
23
+ Gradient and hessian, for simplicity without intercept, are::
24
+
25
+ gradient = 1 / s_sum * X.T @ loss.gradient + l2_reg_strength * coef
26
+ hessian = 1 / s_sum * X.T @ diag(loss.hessian) @ X
27
+ + l2_reg_strength * identity
28
+
29
+ Conventions:
30
+ if fit_intercept:
31
+ n_dof = n_features + 1
32
+ else:
33
+ n_dof = n_features
34
+
35
+ if base_loss.is_multiclass:
36
+ coef.shape = (n_classes, n_dof) or ravelled (n_classes * n_dof,)
37
+ else:
38
+ coef.shape = (n_dof,)
39
+
40
+ The intercept term is at the end of the coef array:
41
+ if base_loss.is_multiclass:
42
+ if coef.shape (n_classes, n_dof):
43
+ intercept = coef[:, -1]
44
+ if coef.shape (n_classes * n_dof,)
45
+ intercept = coef[n_features::n_dof] = coef[(n_dof-1)::n_dof]
46
+ intercept.shape = (n_classes,)
47
+ else:
48
+ intercept = coef[-1]
49
+
50
+ Note: If coef has shape (n_classes * n_dof,), the 2d-array can be reconstructed as
51
+
52
+ coef.reshape((n_classes, -1), order="F")
53
+
54
+ The option order="F" makes coef[:, i] contiguous. This, in turn, makes the
55
+ coefficients without intercept, coef[:, :-1], contiguous and speeds up
56
+ matrix-vector computations.
57
+
58
+ Note: If the average loss per sample is wanted instead of the sum of the loss per
59
+ sample, one can simply use a rescaled sample_weight such that
60
+ sum(sample_weight) = 1.
61
+
62
+ Parameters
63
+ ----------
64
+ base_loss : instance of class BaseLoss from sklearn._loss.
65
+ fit_intercept : bool
66
+ """
67
+
68
+ def __init__(self, base_loss, fit_intercept):
69
+ self.base_loss = base_loss
70
+ self.fit_intercept = fit_intercept
71
+
72
+ def init_zero_coef(self, X, dtype=None):
73
+ """Allocate coef of correct shape with zeros.
74
+
75
+ Parameters:
76
+ -----------
77
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
78
+ Training data.
79
+ dtype : data-type, default=None
80
+ Overrides the data type of coef. With dtype=None, coef will have the same
81
+ dtype as X.
82
+
83
+ Returns
84
+ -------
85
+ coef : ndarray of shape (n_dof,) or (n_classes, n_dof)
86
+ Coefficients of a linear model.
87
+ """
88
+ n_features = X.shape[1]
89
+ n_classes = self.base_loss.n_classes
90
+ if self.fit_intercept:
91
+ n_dof = n_features + 1
92
+ else:
93
+ n_dof = n_features
94
+ if self.base_loss.is_multiclass:
95
+ coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order="F")
96
+ else:
97
+ coef = np.zeros_like(X, shape=n_dof, dtype=dtype)
98
+ return coef
99
+
100
+ def weight_intercept(self, coef):
101
+ """Helper function to get coefficients and intercept.
102
+
103
+ Parameters
104
+ ----------
105
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
106
+ Coefficients of a linear model.
107
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
108
+ i.e. one reconstructs the 2d-array via
109
+ coef.reshape((n_classes, -1), order="F").
110
+
111
+ Returns
112
+ -------
113
+ weights : ndarray of shape (n_features,) or (n_classes, n_features)
114
+ Coefficients without intercept term.
115
+ intercept : float or ndarray of shape (n_classes,)
116
+ Intercept terms.
117
+ """
118
+ if not self.base_loss.is_multiclass:
119
+ if self.fit_intercept:
120
+ intercept = coef[-1]
121
+ weights = coef[:-1]
122
+ else:
123
+ intercept = 0.0
124
+ weights = coef
125
+ else:
126
+ # reshape to (n_classes, n_dof)
127
+ if coef.ndim == 1:
128
+ weights = coef.reshape((self.base_loss.n_classes, -1), order="F")
129
+ else:
130
+ weights = coef
131
+ if self.fit_intercept:
132
+ intercept = weights[:, -1]
133
+ weights = weights[:, :-1]
134
+ else:
135
+ intercept = 0.0
136
+
137
+ return weights, intercept
138
+
139
+ def weight_intercept_raw(self, coef, X):
140
+ """Helper function to get coefficients, intercept and raw_prediction.
141
+
142
+ Parameters
143
+ ----------
144
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
145
+ Coefficients of a linear model.
146
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
147
+ i.e. one reconstructs the 2d-array via
148
+ coef.reshape((n_classes, -1), order="F").
149
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
150
+ Training data.
151
+
152
+ Returns
153
+ -------
154
+ weights : ndarray of shape (n_features,) or (n_classes, n_features)
155
+ Coefficients without intercept term.
156
+ intercept : float or ndarray of shape (n_classes,)
157
+ Intercept terms.
158
+ raw_prediction : ndarray of shape (n_samples,) or \
159
+ (n_samples, n_classes)
160
+ """
161
+ weights, intercept = self.weight_intercept(coef)
162
+
163
+ if not self.base_loss.is_multiclass:
164
+ raw_prediction = X @ weights + intercept
165
+ else:
166
+ # weights has shape (n_classes, n_dof)
167
+ raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous
168
+
169
+ return weights, intercept, raw_prediction
170
+
171
+ def l2_penalty(self, weights, l2_reg_strength):
172
+ """Compute L2 penalty term l2_reg_strength/2 *||w||_2^2."""
173
+ norm2_w = weights @ weights if weights.ndim == 1 else squared_norm(weights)
174
+ return 0.5 * l2_reg_strength * norm2_w
175
+
176
+ def loss(
177
+ self,
178
+ coef,
179
+ X,
180
+ y,
181
+ sample_weight=None,
182
+ l2_reg_strength=0.0,
183
+ n_threads=1,
184
+ raw_prediction=None,
185
+ ):
186
+ """Compute the loss as weighted average over point-wise losses.
187
+
188
+ Parameters
189
+ ----------
190
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
191
+ Coefficients of a linear model.
192
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
193
+ i.e. one reconstructs the 2d-array via
194
+ coef.reshape((n_classes, -1), order="F").
195
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
196
+ Training data.
197
+ y : contiguous array of shape (n_samples,)
198
+ Observed, true target values.
199
+ sample_weight : None or contiguous array of shape (n_samples,), default=None
200
+ Sample weights.
201
+ l2_reg_strength : float, default=0.0
202
+ L2 regularization strength
203
+ n_threads : int, default=1
204
+ Number of OpenMP threads to use.
205
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
206
+ shape (n_samples, n_classes)
207
+ Raw prediction values (in link space). If provided, these are used. If
208
+ None, then raw_prediction = X @ coef + intercept is calculated.
209
+
210
+ Returns
211
+ -------
212
+ loss : float
213
+ Weighted average of losses per sample, plus penalty.
214
+ """
215
+ if raw_prediction is None:
216
+ weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
217
+ else:
218
+ weights, intercept = self.weight_intercept(coef)
219
+
220
+ loss = self.base_loss.loss(
221
+ y_true=y,
222
+ raw_prediction=raw_prediction,
223
+ sample_weight=None,
224
+ n_threads=n_threads,
225
+ )
226
+ loss = np.average(loss, weights=sample_weight)
227
+
228
+ return loss + self.l2_penalty(weights, l2_reg_strength)
229
+
230
+ def loss_gradient(
231
+ self,
232
+ coef,
233
+ X,
234
+ y,
235
+ sample_weight=None,
236
+ l2_reg_strength=0.0,
237
+ n_threads=1,
238
+ raw_prediction=None,
239
+ ):
240
+ """Computes the sum of loss and gradient w.r.t. coef.
241
+
242
+ Parameters
243
+ ----------
244
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
245
+ Coefficients of a linear model.
246
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
247
+ i.e. one reconstructs the 2d-array via
248
+ coef.reshape((n_classes, -1), order="F").
249
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
250
+ Training data.
251
+ y : contiguous array of shape (n_samples,)
252
+ Observed, true target values.
253
+ sample_weight : None or contiguous array of shape (n_samples,), default=None
254
+ Sample weights.
255
+ l2_reg_strength : float, default=0.0
256
+ L2 regularization strength
257
+ n_threads : int, default=1
258
+ Number of OpenMP threads to use.
259
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
260
+ shape (n_samples, n_classes)
261
+ Raw prediction values (in link space). If provided, these are used. If
262
+ None, then raw_prediction = X @ coef + intercept is calculated.
263
+
264
+ Returns
265
+ -------
266
+ loss : float
267
+ Weighted average of losses per sample, plus penalty.
268
+
269
+ gradient : ndarray of shape coef.shape
270
+ The gradient of the loss.
271
+ """
272
+ (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
273
+ n_dof = n_features + int(self.fit_intercept)
274
+
275
+ if raw_prediction is None:
276
+ weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
277
+ else:
278
+ weights, intercept = self.weight_intercept(coef)
279
+
280
+ loss, grad_pointwise = self.base_loss.loss_gradient(
281
+ y_true=y,
282
+ raw_prediction=raw_prediction,
283
+ sample_weight=sample_weight,
284
+ n_threads=n_threads,
285
+ )
286
+ sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
287
+ loss = loss.sum() / sw_sum
288
+ loss += self.l2_penalty(weights, l2_reg_strength)
289
+
290
+ grad_pointwise /= sw_sum
291
+
292
+ if not self.base_loss.is_multiclass:
293
+ grad = np.empty_like(coef, dtype=weights.dtype)
294
+ grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
295
+ if self.fit_intercept:
296
+ grad[-1] = grad_pointwise.sum()
297
+ else:
298
+ grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
299
+ # grad_pointwise.shape = (n_samples, n_classes)
300
+ grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
301
+ if self.fit_intercept:
302
+ grad[:, -1] = grad_pointwise.sum(axis=0)
303
+ if coef.ndim == 1:
304
+ grad = grad.ravel(order="F")
305
+
306
+ return loss, grad
307
+
308
+ def gradient(
309
+ self,
310
+ coef,
311
+ X,
312
+ y,
313
+ sample_weight=None,
314
+ l2_reg_strength=0.0,
315
+ n_threads=1,
316
+ raw_prediction=None,
317
+ ):
318
+ """Computes the gradient w.r.t. coef.
319
+
320
+ Parameters
321
+ ----------
322
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
323
+ Coefficients of a linear model.
324
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
325
+ i.e. one reconstructs the 2d-array via
326
+ coef.reshape((n_classes, -1), order="F").
327
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
328
+ Training data.
329
+ y : contiguous array of shape (n_samples,)
330
+ Observed, true target values.
331
+ sample_weight : None or contiguous array of shape (n_samples,), default=None
332
+ Sample weights.
333
+ l2_reg_strength : float, default=0.0
334
+ L2 regularization strength
335
+ n_threads : int, default=1
336
+ Number of OpenMP threads to use.
337
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
338
+ shape (n_samples, n_classes)
339
+ Raw prediction values (in link space). If provided, these are used. If
340
+ None, then raw_prediction = X @ coef + intercept is calculated.
341
+
342
+ Returns
343
+ -------
344
+ gradient : ndarray of shape coef.shape
345
+ The gradient of the loss.
346
+ """
347
+ (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
348
+ n_dof = n_features + int(self.fit_intercept)
349
+
350
+ if raw_prediction is None:
351
+ weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
352
+ else:
353
+ weights, intercept = self.weight_intercept(coef)
354
+
355
+ grad_pointwise = self.base_loss.gradient(
356
+ y_true=y,
357
+ raw_prediction=raw_prediction,
358
+ sample_weight=sample_weight,
359
+ n_threads=n_threads,
360
+ )
361
+ sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
362
+ grad_pointwise /= sw_sum
363
+
364
+ if not self.base_loss.is_multiclass:
365
+ grad = np.empty_like(coef, dtype=weights.dtype)
366
+ grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
367
+ if self.fit_intercept:
368
+ grad[-1] = grad_pointwise.sum()
369
+ return grad
370
+ else:
371
+ grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
372
+ # gradient.shape = (n_samples, n_classes)
373
+ grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
374
+ if self.fit_intercept:
375
+ grad[:, -1] = grad_pointwise.sum(axis=0)
376
+ if coef.ndim == 1:
377
+ return grad.ravel(order="F")
378
+ else:
379
+ return grad
380
+
381
+ def gradient_hessian(
382
+ self,
383
+ coef,
384
+ X,
385
+ y,
386
+ sample_weight=None,
387
+ l2_reg_strength=0.0,
388
+ n_threads=1,
389
+ gradient_out=None,
390
+ hessian_out=None,
391
+ raw_prediction=None,
392
+ ):
393
+ """Computes gradient and hessian w.r.t. coef.
394
+
395
+ Parameters
396
+ ----------
397
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
398
+ Coefficients of a linear model.
399
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
400
+ i.e. one reconstructs the 2d-array via
401
+ coef.reshape((n_classes, -1), order="F").
402
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
403
+ Training data.
404
+ y : contiguous array of shape (n_samples,)
405
+ Observed, true target values.
406
+ sample_weight : None or contiguous array of shape (n_samples,), default=None
407
+ Sample weights.
408
+ l2_reg_strength : float, default=0.0
409
+ L2 regularization strength
410
+ n_threads : int, default=1
411
+ Number of OpenMP threads to use.
412
+ gradient_out : None or ndarray of shape coef.shape
413
+ A location into which the gradient is stored. If None, a new array
414
+ might be created.
415
+ hessian_out : None or ndarray
416
+ A location into which the hessian is stored. If None, a new array
417
+ might be created.
418
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
419
+ shape (n_samples, n_classes)
420
+ Raw prediction values (in link space). If provided, these are used. If
421
+ None, then raw_prediction = X @ coef + intercept is calculated.
422
+
423
+ Returns
424
+ -------
425
+ gradient : ndarray of shape coef.shape
426
+ The gradient of the loss.
427
+
428
+ hessian : ndarray
429
+ Hessian matrix.
430
+
431
+ hessian_warning : bool
432
+ True if pointwise hessian has more than half of its elements non-positive.
433
+ """
434
+ n_samples, n_features = X.shape
435
+ n_dof = n_features + int(self.fit_intercept)
436
+
437
+ if raw_prediction is None:
438
+ weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
439
+ else:
440
+ weights, intercept = self.weight_intercept(coef)
441
+
442
+ grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
443
+ y_true=y,
444
+ raw_prediction=raw_prediction,
445
+ sample_weight=sample_weight,
446
+ n_threads=n_threads,
447
+ )
448
+ sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
449
+ grad_pointwise /= sw_sum
450
+ hess_pointwise /= sw_sum
451
+
452
+ # For non-canonical link functions and far away from the optimum, the pointwise
453
+ # hessian can be negative. We take care that 75% of the hessian entries are
454
+ # positive.
455
+ hessian_warning = np.mean(hess_pointwise <= 0) > 0.25
456
+ hess_pointwise = np.abs(hess_pointwise)
457
+
458
+ if not self.base_loss.is_multiclass:
459
+ # gradient
460
+ if gradient_out is None:
461
+ grad = np.empty_like(coef, dtype=weights.dtype)
462
+ else:
463
+ grad = gradient_out
464
+ grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
465
+ if self.fit_intercept:
466
+ grad[-1] = grad_pointwise.sum()
467
+
468
+ # hessian
469
+ if hessian_out is None:
470
+ hess = np.empty(shape=(n_dof, n_dof), dtype=weights.dtype)
471
+ else:
472
+ hess = hessian_out
473
+
474
+ if hessian_warning:
475
+ # Exit early without computing the hessian.
476
+ return grad, hess, hessian_warning
477
+
478
+ # TODO: This "sandwich product", X' diag(W) X, is the main computational
479
+ # bottleneck for solvers. A dedicated Cython routine might improve it
480
+ # exploiting the symmetry (as opposed to, e.g., BLAS gemm).
481
+ if sparse.issparse(X):
482
+ hess[:n_features, :n_features] = (
483
+ X.T
484
+ @ sparse.dia_matrix(
485
+ (hess_pointwise, 0), shape=(n_samples, n_samples)
486
+ )
487
+ @ X
488
+ ).toarray()
489
+ else:
490
+ # np.einsum may use less memory but the following, using BLAS matrix
491
+ # multiplication (gemm), is by far faster.
492
+ WX = hess_pointwise[:, None] * X
493
+ hess[:n_features, :n_features] = np.dot(X.T, WX)
494
+
495
+ if l2_reg_strength > 0:
496
+ # The L2 penalty enters the Hessian on the diagonal only. To add those
497
+ # terms, we use a flattened view on the array.
498
+ hess.reshape(-1)[
499
+ : (n_features * n_dof) : (n_dof + 1)
500
+ ] += l2_reg_strength
501
+
502
+ if self.fit_intercept:
503
+ # With intercept included as added column to X, the hessian becomes
504
+ # hess = (X, 1)' @ diag(h) @ (X, 1)
505
+ # = (X' @ diag(h) @ X, X' @ h)
506
+ # ( h @ X, sum(h))
507
+ # The left upper part has already been filled, it remains to compute
508
+ # the last row and the last column.
509
+ Xh = X.T @ hess_pointwise
510
+ hess[:-1, -1] = Xh
511
+ hess[-1, :-1] = Xh
512
+ hess[-1, -1] = hess_pointwise.sum()
513
+ else:
514
+ # Here we may safely assume HalfMultinomialLoss aka categorical
515
+ # cross-entropy.
516
+ raise NotImplementedError
517
+
518
+ return grad, hess, hessian_warning
519
+
520
+ def gradient_hessian_product(
521
+ self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1
522
+ ):
523
+ """Computes gradient and hessp (hessian product function) w.r.t. coef.
524
+
525
+ Parameters
526
+ ----------
527
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
528
+ Coefficients of a linear model.
529
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
530
+ i.e. one reconstructs the 2d-array via
531
+ coef.reshape((n_classes, -1), order="F").
532
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
533
+ Training data.
534
+ y : contiguous array of shape (n_samples,)
535
+ Observed, true target values.
536
+ sample_weight : None or contiguous array of shape (n_samples,), default=None
537
+ Sample weights.
538
+ l2_reg_strength : float, default=0.0
539
+ L2 regularization strength
540
+ n_threads : int, default=1
541
+ Number of OpenMP threads to use.
542
+
543
+ Returns
544
+ -------
545
+ gradient : ndarray of shape coef.shape
546
+ The gradient of the loss.
547
+
548
+ hessp : callable
549
+ Function that takes in a vector input of shape of gradient and
550
+ and returns matrix-vector product with hessian.
551
+ """
552
+ (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
553
+ n_dof = n_features + int(self.fit_intercept)
554
+ weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
555
+ sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
556
+
557
+ if not self.base_loss.is_multiclass:
558
+ grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
559
+ y_true=y,
560
+ raw_prediction=raw_prediction,
561
+ sample_weight=sample_weight,
562
+ n_threads=n_threads,
563
+ )
564
+ grad_pointwise /= sw_sum
565
+ hess_pointwise /= sw_sum
566
+ grad = np.empty_like(coef, dtype=weights.dtype)
567
+ grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
568
+ if self.fit_intercept:
569
+ grad[-1] = grad_pointwise.sum()
570
+
571
+ # Precompute as much as possible: hX, hX_sum and hessian_sum
572
+ hessian_sum = hess_pointwise.sum()
573
+ if sparse.issparse(X):
574
+ hX = (
575
+ sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples))
576
+ @ X
577
+ )
578
+ else:
579
+ hX = hess_pointwise[:, np.newaxis] * X
580
+
581
+ if self.fit_intercept:
582
+ # Calculate the double derivative with respect to intercept.
583
+ # Note: In case hX is sparse, hX.sum is a matrix object.
584
+ hX_sum = np.squeeze(np.asarray(hX.sum(axis=0)))
585
+ # prevent squeezing to zero-dim array if n_features == 1
586
+ hX_sum = np.atleast_1d(hX_sum)
587
+
588
+ # With intercept included and l2_reg_strength = 0, hessp returns
589
+ # res = (X, 1)' @ diag(h) @ (X, 1) @ s
590
+ # = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1])
591
+ # res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1]
592
+ # res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1]
593
+ def hessp(s):
594
+ ret = np.empty_like(s)
595
+ if sparse.issparse(X):
596
+ ret[:n_features] = X.T @ (hX @ s[:n_features])
597
+ else:
598
+ ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]])
599
+ ret[:n_features] += l2_reg_strength * s[:n_features]
600
+
601
+ if self.fit_intercept:
602
+ ret[:n_features] += s[-1] * hX_sum
603
+ ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1]
604
+ return ret
605
+
606
+ else:
607
+ # Here we may safely assume HalfMultinomialLoss aka categorical
608
+ # cross-entropy.
609
+ # HalfMultinomialLoss computes only the diagonal part of the hessian, i.e.
610
+ # diagonal in the classes. Here, we want the matrix-vector product of the
611
+ # full hessian. Therefore, we call gradient_proba.
612
+ grad_pointwise, proba = self.base_loss.gradient_proba(
613
+ y_true=y,
614
+ raw_prediction=raw_prediction,
615
+ sample_weight=sample_weight,
616
+ n_threads=n_threads,
617
+ )
618
+ grad_pointwise /= sw_sum
619
+ grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
620
+ grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
621
+ if self.fit_intercept:
622
+ grad[:, -1] = grad_pointwise.sum(axis=0)
623
+
624
+ # Full hessian-vector product, i.e. not only the diagonal part of the
625
+ # hessian. Derivation with some index battle for input vector s:
626
+ # - sample index i
627
+ # - feature indices j, m
628
+ # - class indices k, l
629
+ # - 1_{k=l} is one if k=l else 0
630
+ # - p_i_k is the (predicted) probability that sample i belongs to class k
631
+ # for all i: sum_k p_i_k = 1
632
+ # - s_l_m is input vector for class l and feature m
633
+ # - X' = X transposed
634
+ #
635
+ # Note: Hessian with dropping most indices is just:
636
+ # X' @ p_k (1(k=l) - p_l) @ X
637
+ #
638
+ # result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m
639
+ # = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l)
640
+ # * X_{im} s_l_m
641
+ # = sum_{i, m} (X')_{ji} * p_i_k
642
+ # * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m)
643
+ #
644
+ # See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411 # noqa
645
+ def hessp(s):
646
+ s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof)
647
+ if self.fit_intercept:
648
+ s_intercept = s[:, -1]
649
+ s = s[:, :-1] # shape = (n_classes, n_features)
650
+ else:
651
+ s_intercept = 0
652
+ tmp = X @ s.T + s_intercept # X_{im} * s_k_m
653
+ tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l ..
654
+ tmp *= proba # * p_i_k
655
+ if sample_weight is not None:
656
+ tmp *= sample_weight[:, np.newaxis]
657
+ # hess_prod = empty_like(grad), but we ravel grad below and this
658
+ # function is run after that.
659
+ hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
660
+ hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s
661
+ if self.fit_intercept:
662
+ hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum
663
+ if coef.ndim == 1:
664
+ return hess_prod.ravel(order="F")
665
+ else:
666
+ return hess_prod
667
+
668
+ if coef.ndim == 1:
669
+ return grad.ravel(order="F"), hessp
670
+
671
+ return grad, hessp
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py ADDED
@@ -0,0 +1,2190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Logistic Regression
3
+ """
4
+
5
+ # Author: Gael Varoquaux <[email protected]>
6
+ # Fabian Pedregosa <[email protected]>
7
+ # Alexandre Gramfort <[email protected]>
8
+ # Manoj Kumar <[email protected]>
9
+ # Lars Buitinck
10
+ # Simon Wu <[email protected]>
11
+ # Arthur Mensch <[email protected]
12
+
13
+ import numbers
14
+ import warnings
15
+ from numbers import Integral, Real
16
+
17
+ import numpy as np
18
+ from joblib import effective_n_jobs
19
+ from scipy import optimize
20
+
21
+ from sklearn.metrics import get_scorer_names
22
+
23
+ from .._loss.loss import HalfBinomialLoss, HalfMultinomialLoss
24
+ from ..base import _fit_context
25
+ from ..metrics import get_scorer
26
+ from ..model_selection import check_cv
27
+ from ..preprocessing import LabelBinarizer, LabelEncoder
28
+ from ..svm._base import _fit_liblinear
29
+ from ..utils import (
30
+ Bunch,
31
+ check_array,
32
+ check_consistent_length,
33
+ check_random_state,
34
+ compute_class_weight,
35
+ )
36
+ from ..utils._param_validation import Interval, StrOptions
37
+ from ..utils.extmath import row_norms, softmax
38
+ from ..utils.metadata_routing import (
39
+ MetadataRouter,
40
+ MethodMapping,
41
+ _raise_for_params,
42
+ _routing_enabled,
43
+ process_routing,
44
+ )
45
+ from ..utils.multiclass import check_classification_targets
46
+ from ..utils.optimize import _check_optimize_result, _newton_cg
47
+ from ..utils.parallel import Parallel, delayed
48
+ from ..utils.validation import (
49
+ _check_method_params,
50
+ _check_sample_weight,
51
+ check_is_fitted,
52
+ )
53
+ from ._base import BaseEstimator, LinearClassifierMixin, SparseCoefMixin
54
+ from ._glm.glm import NewtonCholeskySolver
55
+ from ._linear_loss import LinearModelLoss
56
+ from ._sag import sag_solver
57
+
58
+ _LOGISTIC_SOLVER_CONVERGENCE_MSG = (
59
+ "Please also refer to the documentation for alternative solver options:\n"
60
+ " https://scikit-learn.org/stable/modules/linear_model.html"
61
+ "#logistic-regression"
62
+ )
63
+
64
+
65
+ def _check_solver(solver, penalty, dual):
66
+ if solver not in ["liblinear", "saga"] and penalty not in ("l2", None):
67
+ raise ValueError(
68
+ f"Solver {solver} supports only 'l2' or None penalties, got {penalty} "
69
+ "penalty."
70
+ )
71
+ if solver != "liblinear" and dual:
72
+ raise ValueError(f"Solver {solver} supports only dual=False, got dual={dual}")
73
+
74
+ if penalty == "elasticnet" and solver != "saga":
75
+ raise ValueError(
76
+ f"Only 'saga' solver supports elasticnet penalty, got solver={solver}."
77
+ )
78
+
79
+ if solver == "liblinear" and penalty is None:
80
+ raise ValueError("penalty=None is not supported for the liblinear solver")
81
+
82
+ return solver
83
+
84
+
85
+ def _check_multi_class(multi_class, solver, n_classes):
86
+ """Computes the multi class type, either "multinomial" or "ovr".
87
+
88
+ For `n_classes` > 2 and a solver that supports it, returns "multinomial".
89
+ For all other cases, in particular binary classification, return "ovr".
90
+ """
91
+ if multi_class == "auto":
92
+ if solver in ("liblinear", "newton-cholesky"):
93
+ multi_class = "ovr"
94
+ elif n_classes > 2:
95
+ multi_class = "multinomial"
96
+ else:
97
+ multi_class = "ovr"
98
+ if multi_class == "multinomial" and solver in ("liblinear", "newton-cholesky"):
99
+ raise ValueError("Solver %s does not support a multinomial backend." % solver)
100
+ return multi_class
101
+
102
+
103
+ def _logistic_regression_path(
104
+ X,
105
+ y,
106
+ pos_class=None,
107
+ Cs=10,
108
+ fit_intercept=True,
109
+ max_iter=100,
110
+ tol=1e-4,
111
+ verbose=0,
112
+ solver="lbfgs",
113
+ coef=None,
114
+ class_weight=None,
115
+ dual=False,
116
+ penalty="l2",
117
+ intercept_scaling=1.0,
118
+ multi_class="auto",
119
+ random_state=None,
120
+ check_input=True,
121
+ max_squared_sum=None,
122
+ sample_weight=None,
123
+ l1_ratio=None,
124
+ n_threads=1,
125
+ ):
126
+ """Compute a Logistic Regression model for a list of regularization
127
+ parameters.
128
+
129
+ This is an implementation that uses the result of the previous model
130
+ to speed up computations along the set of solutions, making it faster
131
+ than sequentially calling LogisticRegression for the different parameters.
132
+ Note that there will be no speedup with liblinear solver, since it does
133
+ not handle warm-starting.
134
+
135
+ Read more in the :ref:`User Guide <logistic_regression>`.
136
+
137
+ Parameters
138
+ ----------
139
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
140
+ Input data.
141
+
142
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
143
+ Input data, target values.
144
+
145
+ pos_class : int, default=None
146
+ The class with respect to which we perform a one-vs-all fit.
147
+ If None, then it is assumed that the given problem is binary.
148
+
149
+ Cs : int or array-like of shape (n_cs,), default=10
150
+ List of values for the regularization parameter or integer specifying
151
+ the number of regularization parameters that should be used. In this
152
+ case, the parameters will be chosen in a logarithmic scale between
153
+ 1e-4 and 1e4.
154
+
155
+ fit_intercept : bool, default=True
156
+ Whether to fit an intercept for the model. In this case the shape of
157
+ the returned array is (n_cs, n_features + 1).
158
+
159
+ max_iter : int, default=100
160
+ Maximum number of iterations for the solver.
161
+
162
+ tol : float, default=1e-4
163
+ Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
164
+ will stop when ``max{|g_i | i = 1, ..., n} <= tol``
165
+ where ``g_i`` is the i-th component of the gradient.
166
+
167
+ verbose : int, default=0
168
+ For the liblinear and lbfgs solvers set verbose to any positive
169
+ number for verbosity.
170
+
171
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
172
+ default='lbfgs'
173
+ Numerical solver to use.
174
+
175
+ coef : array-like of shape (n_features,), default=None
176
+ Initialization value for coefficients of logistic regression.
177
+ Useless for liblinear solver.
178
+
179
+ class_weight : dict or 'balanced', default=None
180
+ Weights associated with classes in the form ``{class_label: weight}``.
181
+ If not given, all classes are supposed to have weight one.
182
+
183
+ The "balanced" mode uses the values of y to automatically adjust
184
+ weights inversely proportional to class frequencies in the input data
185
+ as ``n_samples / (n_classes * np.bincount(y))``.
186
+
187
+ Note that these weights will be multiplied with sample_weight (passed
188
+ through the fit method) if sample_weight is specified.
189
+
190
+ dual : bool, default=False
191
+ Dual or primal formulation. Dual formulation is only implemented for
192
+ l2 penalty with liblinear solver. Prefer dual=False when
193
+ n_samples > n_features.
194
+
195
+ penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
196
+ Used to specify the norm used in the penalization. The 'newton-cg',
197
+ 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
198
+ only supported by the 'saga' solver.
199
+
200
+ intercept_scaling : float, default=1.
201
+ Useful only when the solver 'liblinear' is used
202
+ and self.fit_intercept is set to True. In this case, x becomes
203
+ [x, self.intercept_scaling],
204
+ i.e. a "synthetic" feature with constant value equal to
205
+ intercept_scaling is appended to the instance vector.
206
+ The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
207
+
208
+ Note! the synthetic feature weight is subject to l1/l2 regularization
209
+ as all other features.
210
+ To lessen the effect of regularization on synthetic feature weight
211
+ (and therefore on the intercept) intercept_scaling has to be increased.
212
+
213
+ multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
214
+ If the option chosen is 'ovr', then a binary problem is fit for each
215
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
216
+ across the entire probability distribution, *even when the data is
217
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
218
+ 'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
219
+ and otherwise selects 'multinomial'.
220
+
221
+ .. versionadded:: 0.18
222
+ Stochastic Average Gradient descent solver for 'multinomial' case.
223
+ .. versionchanged:: 0.22
224
+ Default changed from 'ovr' to 'auto' in 0.22.
225
+
226
+ random_state : int, RandomState instance, default=None
227
+ Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
228
+ data. See :term:`Glossary <random_state>` for details.
229
+
230
+ check_input : bool, default=True
231
+ If False, the input arrays X and y will not be checked.
232
+
233
+ max_squared_sum : float, default=None
234
+ Maximum squared sum of X over samples. Used only in SAG solver.
235
+ If None, it will be computed, going through all the samples.
236
+ The value should be precomputed to speed up cross validation.
237
+
238
+ sample_weight : array-like of shape(n_samples,), default=None
239
+ Array of weights that are assigned to individual samples.
240
+ If not provided, then each sample is given unit weight.
241
+
242
+ l1_ratio : float, default=None
243
+ The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
244
+ used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
245
+ to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
246
+ to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
247
+ combination of L1 and L2.
248
+
249
+ n_threads : int, default=1
250
+ Number of OpenMP threads to use.
251
+
252
+ Returns
253
+ -------
254
+ coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
255
+ List of coefficients for the Logistic Regression model. If
256
+ fit_intercept is set to True then the second dimension will be
257
+ n_features + 1, where the last item represents the intercept. For
258
+ ``multiclass='multinomial'``, the shape is (n_classes, n_cs,
259
+ n_features) or (n_classes, n_cs, n_features + 1).
260
+
261
+ Cs : ndarray
262
+ Grid of Cs used for cross-validation.
263
+
264
+ n_iter : array of shape (n_cs,)
265
+ Actual number of iteration for each Cs.
266
+
267
+ Notes
268
+ -----
269
+ You might get slightly different results with the solver liblinear than
270
+ with the others since this uses LIBLINEAR which penalizes the intercept.
271
+
272
+ .. versionchanged:: 0.19
273
+ The "copy" parameter was removed.
274
+ """
275
+ if isinstance(Cs, numbers.Integral):
276
+ Cs = np.logspace(-4, 4, Cs)
277
+
278
+ solver = _check_solver(solver, penalty, dual)
279
+
280
+ # Preprocessing.
281
+ if check_input:
282
+ X = check_array(
283
+ X,
284
+ accept_sparse="csr",
285
+ dtype=np.float64,
286
+ accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
287
+ )
288
+ y = check_array(y, ensure_2d=False, dtype=None)
289
+ check_consistent_length(X, y)
290
+ n_samples, n_features = X.shape
291
+
292
+ classes = np.unique(y)
293
+ random_state = check_random_state(random_state)
294
+
295
+ multi_class = _check_multi_class(multi_class, solver, len(classes))
296
+ if pos_class is None and multi_class != "multinomial":
297
+ if classes.size > 2:
298
+ raise ValueError("To fit OvR, use the pos_class argument")
299
+ # np.unique(y) gives labels in sorted order.
300
+ pos_class = classes[1]
301
+
302
+ if sample_weight is not None or class_weight is not None:
303
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True)
304
+
305
+ # If class_weights is a dict (provided by the user), the weights
306
+ # are assigned to the original labels. If it is "balanced", then
307
+ # the class_weights are assigned after masking the labels with a OvR.
308
+ le = LabelEncoder()
309
+ if isinstance(class_weight, dict) or (
310
+ multi_class == "multinomial" and class_weight is not None
311
+ ):
312
+ class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
313
+ sample_weight *= class_weight_[le.fit_transform(y)]
314
+
315
+ # For doing a ovr, we need to mask the labels first. For the
316
+ # multinomial case this is not necessary.
317
+ if multi_class == "ovr":
318
+ w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
319
+ mask = y == pos_class
320
+ y_bin = np.ones(y.shape, dtype=X.dtype)
321
+ if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
322
+ # HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead
323
+ # of in [-1, 1].
324
+ mask_classes = np.array([0, 1])
325
+ y_bin[~mask] = 0.0
326
+ else:
327
+ mask_classes = np.array([-1, 1])
328
+ y_bin[~mask] = -1.0
329
+
330
+ # for compute_class_weight
331
+ if class_weight == "balanced":
332
+ class_weight_ = compute_class_weight(
333
+ class_weight, classes=mask_classes, y=y_bin
334
+ )
335
+ sample_weight *= class_weight_[le.fit_transform(y_bin)]
336
+
337
+ else:
338
+ if solver in ["sag", "saga", "lbfgs", "newton-cg"]:
339
+ # SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder,
340
+ # not LabelBinarizer, i.e. y as a 1d-array of integers.
341
+ # LabelEncoder also saves memory compared to LabelBinarizer, especially
342
+ # when n_classes is large.
343
+ le = LabelEncoder()
344
+ Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
345
+ else:
346
+ # For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded.
347
+ lbin = LabelBinarizer()
348
+ Y_multi = lbin.fit_transform(y)
349
+ if Y_multi.shape[1] == 1:
350
+ Y_multi = np.hstack([1 - Y_multi, Y_multi])
351
+
352
+ w0 = np.zeros(
353
+ (classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype
354
+ )
355
+
356
+ # IMPORTANT NOTE:
357
+ # All solvers relying on LinearModelLoss need to scale the penalty with n_samples
358
+ # or the sum of sample weights because the implemented logistic regression
359
+ # objective here is (unfortunately)
360
+ # C * sum(pointwise_loss) + penalty
361
+ # instead of (as LinearModelLoss does)
362
+ # mean(pointwise_loss) + 1/C * penalty
363
+ if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
364
+ # This needs to be calculated after sample_weight is multiplied by
365
+ # class_weight. It is even tested that passing class_weight is equivalent to
366
+ # passing sample_weights according to class_weight.
367
+ sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
368
+
369
+ if coef is not None:
370
+ # it must work both giving the bias term and not
371
+ if multi_class == "ovr":
372
+ if coef.size not in (n_features, w0.size):
373
+ raise ValueError(
374
+ "Initialization coef is of shape %d, expected shape %d or %d"
375
+ % (coef.size, n_features, w0.size)
376
+ )
377
+ w0[: coef.size] = coef
378
+ else:
379
+ # For binary problems coef.shape[0] should be 1, otherwise it
380
+ # should be classes.size.
381
+ n_classes = classes.size
382
+ if n_classes == 2:
383
+ n_classes = 1
384
+
385
+ if coef.shape[0] != n_classes or coef.shape[1] not in (
386
+ n_features,
387
+ n_features + 1,
388
+ ):
389
+ raise ValueError(
390
+ "Initialization coef is of shape (%d, %d), expected "
391
+ "shape (%d, %d) or (%d, %d)"
392
+ % (
393
+ coef.shape[0],
394
+ coef.shape[1],
395
+ classes.size,
396
+ n_features,
397
+ classes.size,
398
+ n_features + 1,
399
+ )
400
+ )
401
+
402
+ if n_classes == 1:
403
+ w0[0, : coef.shape[1]] = -coef
404
+ w0[1, : coef.shape[1]] = coef
405
+ else:
406
+ w0[:, : coef.shape[1]] = coef
407
+
408
+ if multi_class == "multinomial":
409
+ if solver in ["lbfgs", "newton-cg"]:
410
+ # scipy.optimize.minimize and newton-cg accept only ravelled parameters,
411
+ # i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and
412
+ # reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F").
413
+ # As w0 is F-contiguous, ravel(order="F") also avoids a copy.
414
+ w0 = w0.ravel(order="F")
415
+ loss = LinearModelLoss(
416
+ base_loss=HalfMultinomialLoss(n_classes=classes.size),
417
+ fit_intercept=fit_intercept,
418
+ )
419
+ target = Y_multi
420
+ if solver == "lbfgs":
421
+ func = loss.loss_gradient
422
+ elif solver == "newton-cg":
423
+ func = loss.loss
424
+ grad = loss.gradient
425
+ hess = loss.gradient_hessian_product # hess = [gradient, hessp]
426
+ warm_start_sag = {"coef": w0.T}
427
+ else:
428
+ target = y_bin
429
+ if solver == "lbfgs":
430
+ loss = LinearModelLoss(
431
+ base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
432
+ )
433
+ func = loss.loss_gradient
434
+ elif solver == "newton-cg":
435
+ loss = LinearModelLoss(
436
+ base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
437
+ )
438
+ func = loss.loss
439
+ grad = loss.gradient
440
+ hess = loss.gradient_hessian_product # hess = [gradient, hessp]
441
+ elif solver == "newton-cholesky":
442
+ loss = LinearModelLoss(
443
+ base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
444
+ )
445
+ warm_start_sag = {"coef": np.expand_dims(w0, axis=1)}
446
+
447
+ coefs = list()
448
+ n_iter = np.zeros(len(Cs), dtype=np.int32)
449
+ for i, C in enumerate(Cs):
450
+ if solver == "lbfgs":
451
+ l2_reg_strength = 1.0 / (C * sw_sum)
452
+ iprint = [-1, 50, 1, 100, 101][
453
+ np.searchsorted(np.array([0, 1, 2, 3]), verbose)
454
+ ]
455
+ opt_res = optimize.minimize(
456
+ func,
457
+ w0,
458
+ method="L-BFGS-B",
459
+ jac=True,
460
+ args=(X, target, sample_weight, l2_reg_strength, n_threads),
461
+ options={
462
+ "maxiter": max_iter,
463
+ "maxls": 50, # default is 20
464
+ "iprint": iprint,
465
+ "gtol": tol,
466
+ "ftol": 64 * np.finfo(float).eps,
467
+ },
468
+ )
469
+ n_iter_i = _check_optimize_result(
470
+ solver,
471
+ opt_res,
472
+ max_iter,
473
+ extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,
474
+ )
475
+ w0, loss = opt_res.x, opt_res.fun
476
+ elif solver == "newton-cg":
477
+ l2_reg_strength = 1.0 / (C * sw_sum)
478
+ args = (X, target, sample_weight, l2_reg_strength, n_threads)
479
+ w0, n_iter_i = _newton_cg(
480
+ hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol
481
+ )
482
+ elif solver == "newton-cholesky":
483
+ l2_reg_strength = 1.0 / (C * sw_sum)
484
+ sol = NewtonCholeskySolver(
485
+ coef=w0,
486
+ linear_loss=loss,
487
+ l2_reg_strength=l2_reg_strength,
488
+ tol=tol,
489
+ max_iter=max_iter,
490
+ n_threads=n_threads,
491
+ verbose=verbose,
492
+ )
493
+ w0 = sol.solve(X=X, y=target, sample_weight=sample_weight)
494
+ n_iter_i = sol.iteration
495
+ elif solver == "liblinear":
496
+ (
497
+ coef_,
498
+ intercept_,
499
+ n_iter_i,
500
+ ) = _fit_liblinear(
501
+ X,
502
+ target,
503
+ C,
504
+ fit_intercept,
505
+ intercept_scaling,
506
+ None,
507
+ penalty,
508
+ dual,
509
+ verbose,
510
+ max_iter,
511
+ tol,
512
+ random_state,
513
+ sample_weight=sample_weight,
514
+ )
515
+ if fit_intercept:
516
+ w0 = np.concatenate([coef_.ravel(), intercept_])
517
+ else:
518
+ w0 = coef_.ravel()
519
+ # n_iter_i is an array for each class. However, `target` is always encoded
520
+ # in {-1, 1}, so we only take the first element of n_iter_i.
521
+ n_iter_i = n_iter_i.item()
522
+
523
+ elif solver in ["sag", "saga"]:
524
+ if multi_class == "multinomial":
525
+ target = target.astype(X.dtype, copy=False)
526
+ loss = "multinomial"
527
+ else:
528
+ loss = "log"
529
+ # alpha is for L2-norm, beta is for L1-norm
530
+ if penalty == "l1":
531
+ alpha = 0.0
532
+ beta = 1.0 / C
533
+ elif penalty == "l2":
534
+ alpha = 1.0 / C
535
+ beta = 0.0
536
+ else: # Elastic-Net penalty
537
+ alpha = (1.0 / C) * (1 - l1_ratio)
538
+ beta = (1.0 / C) * l1_ratio
539
+
540
+ w0, n_iter_i, warm_start_sag = sag_solver(
541
+ X,
542
+ target,
543
+ sample_weight,
544
+ loss,
545
+ alpha,
546
+ beta,
547
+ max_iter,
548
+ tol,
549
+ verbose,
550
+ random_state,
551
+ False,
552
+ max_squared_sum,
553
+ warm_start_sag,
554
+ is_saga=(solver == "saga"),
555
+ )
556
+
557
+ else:
558
+ raise ValueError(
559
+ "solver must be one of {'liblinear', 'lbfgs', "
560
+ "'newton-cg', 'sag'}, got '%s' instead" % solver
561
+ )
562
+
563
+ if multi_class == "multinomial":
564
+ n_classes = max(2, classes.size)
565
+ if solver in ["lbfgs", "newton-cg"]:
566
+ multi_w0 = np.reshape(w0, (n_classes, -1), order="F")
567
+ else:
568
+ multi_w0 = w0
569
+ if n_classes == 2:
570
+ multi_w0 = multi_w0[1][np.newaxis, :]
571
+ coefs.append(multi_w0.copy())
572
+ else:
573
+ coefs.append(w0.copy())
574
+
575
+ n_iter[i] = n_iter_i
576
+
577
+ return np.array(coefs), np.array(Cs), n_iter
578
+
579
+
580
+ # helper function for LogisticCV
581
+ def _log_reg_scoring_path(
582
+ X,
583
+ y,
584
+ train,
585
+ test,
586
+ *,
587
+ pos_class,
588
+ Cs,
589
+ scoring,
590
+ fit_intercept,
591
+ max_iter,
592
+ tol,
593
+ class_weight,
594
+ verbose,
595
+ solver,
596
+ penalty,
597
+ dual,
598
+ intercept_scaling,
599
+ multi_class,
600
+ random_state,
601
+ max_squared_sum,
602
+ sample_weight,
603
+ l1_ratio,
604
+ score_params,
605
+ ):
606
+ """Computes scores across logistic_regression_path
607
+
608
+ Parameters
609
+ ----------
610
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
611
+ Training data.
612
+
613
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
614
+ Target labels.
615
+
616
+ train : list of indices
617
+ The indices of the train set.
618
+
619
+ test : list of indices
620
+ The indices of the test set.
621
+
622
+ pos_class : int
623
+ The class with respect to which we perform a one-vs-all fit.
624
+ If None, then it is assumed that the given problem is binary.
625
+
626
+ Cs : int or list of floats
627
+ Each of the values in Cs describes the inverse of
628
+ regularization strength. If Cs is as an int, then a grid of Cs
629
+ values are chosen in a logarithmic scale between 1e-4 and 1e4.
630
+
631
+ scoring : callable
632
+ A string (see model evaluation documentation) or
633
+ a scorer callable object / function with signature
634
+ ``scorer(estimator, X, y)``. For a list of scoring functions
635
+ that can be used, look at :mod:`sklearn.metrics`.
636
+
637
+ fit_intercept : bool
638
+ If False, then the bias term is set to zero. Else the last
639
+ term of each coef_ gives us the intercept.
640
+
641
+ max_iter : int
642
+ Maximum number of iterations for the solver.
643
+
644
+ tol : float
645
+ Tolerance for stopping criteria.
646
+
647
+ class_weight : dict or 'balanced'
648
+ Weights associated with classes in the form ``{class_label: weight}``.
649
+ If not given, all classes are supposed to have weight one.
650
+
651
+ The "balanced" mode uses the values of y to automatically adjust
652
+ weights inversely proportional to class frequencies in the input data
653
+ as ``n_samples / (n_classes * np.bincount(y))``
654
+
655
+ Note that these weights will be multiplied with sample_weight (passed
656
+ through the fit method) if sample_weight is specified.
657
+
658
+ verbose : int
659
+ For the liblinear and lbfgs solvers set verbose to any positive
660
+ number for verbosity.
661
+
662
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}
663
+ Decides which solver to use.
664
+
665
+ penalty : {'l1', 'l2', 'elasticnet'}
666
+ Used to specify the norm used in the penalization. The 'newton-cg',
667
+ 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
668
+ only supported by the 'saga' solver.
669
+
670
+ dual : bool
671
+ Dual or primal formulation. Dual formulation is only implemented for
672
+ l2 penalty with liblinear solver. Prefer dual=False when
673
+ n_samples > n_features.
674
+
675
+ intercept_scaling : float
676
+ Useful only when the solver 'liblinear' is used
677
+ and self.fit_intercept is set to True. In this case, x becomes
678
+ [x, self.intercept_scaling],
679
+ i.e. a "synthetic" feature with constant value equals to
680
+ intercept_scaling is appended to the instance vector.
681
+ The intercept becomes intercept_scaling * synthetic feature weight
682
+ Note! the synthetic feature weight is subject to l1/l2 regularization
683
+ as all other features.
684
+ To lessen the effect of regularization on synthetic feature weight
685
+ (and therefore on the intercept) intercept_scaling has to be increased.
686
+
687
+ multi_class : {'auto', 'ovr', 'multinomial'}
688
+ If the option chosen is 'ovr', then a binary problem is fit for each
689
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
690
+ across the entire probability distribution, *even when the data is
691
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
692
+
693
+ random_state : int, RandomState instance
694
+ Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
695
+ data. See :term:`Glossary <random_state>` for details.
696
+
697
+ max_squared_sum : float
698
+ Maximum squared sum of X over samples. Used only in SAG solver.
699
+ If None, it will be computed, going through all the samples.
700
+ The value should be precomputed to speed up cross validation.
701
+
702
+ sample_weight : array-like of shape(n_samples,)
703
+ Array of weights that are assigned to individual samples.
704
+ If not provided, then each sample is given unit weight.
705
+
706
+ l1_ratio : float
707
+ The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
708
+ used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
709
+ to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
710
+ to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
711
+ combination of L1 and L2.
712
+
713
+ score_params : dict
714
+ Parameters to pass to the `score` method of the underlying scorer.
715
+
716
+ Returns
717
+ -------
718
+ coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
719
+ List of coefficients for the Logistic Regression model. If
720
+ fit_intercept is set to True then the second dimension will be
721
+ n_features + 1, where the last item represents the intercept.
722
+
723
+ Cs : ndarray
724
+ Grid of Cs used for cross-validation.
725
+
726
+ scores : ndarray of shape (n_cs,)
727
+ Scores obtained for each Cs.
728
+
729
+ n_iter : ndarray of shape(n_cs,)
730
+ Actual number of iteration for each Cs.
731
+ """
732
+ X_train = X[train]
733
+ X_test = X[test]
734
+ y_train = y[train]
735
+ y_test = y[test]
736
+
737
+ if sample_weight is not None:
738
+ sample_weight = _check_sample_weight(sample_weight, X)
739
+ sample_weight = sample_weight[train]
740
+
741
+ coefs, Cs, n_iter = _logistic_regression_path(
742
+ X_train,
743
+ y_train,
744
+ Cs=Cs,
745
+ l1_ratio=l1_ratio,
746
+ fit_intercept=fit_intercept,
747
+ solver=solver,
748
+ max_iter=max_iter,
749
+ class_weight=class_weight,
750
+ pos_class=pos_class,
751
+ multi_class=multi_class,
752
+ tol=tol,
753
+ verbose=verbose,
754
+ dual=dual,
755
+ penalty=penalty,
756
+ intercept_scaling=intercept_scaling,
757
+ random_state=random_state,
758
+ check_input=False,
759
+ max_squared_sum=max_squared_sum,
760
+ sample_weight=sample_weight,
761
+ )
762
+
763
+ log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
764
+
765
+ # The score method of Logistic Regression has a classes_ attribute.
766
+ if multi_class == "ovr":
767
+ log_reg.classes_ = np.array([-1, 1])
768
+ elif multi_class == "multinomial":
769
+ log_reg.classes_ = np.unique(y_train)
770
+ else:
771
+ raise ValueError(
772
+ "multi_class should be either multinomial or ovr, got %d" % multi_class
773
+ )
774
+
775
+ if pos_class is not None:
776
+ mask = y_test == pos_class
777
+ y_test = np.ones(y_test.shape, dtype=np.float64)
778
+ y_test[~mask] = -1.0
779
+
780
+ scores = list()
781
+
782
+ scoring = get_scorer(scoring)
783
+ for w in coefs:
784
+ if multi_class == "ovr":
785
+ w = w[np.newaxis, :]
786
+ if fit_intercept:
787
+ log_reg.coef_ = w[:, :-1]
788
+ log_reg.intercept_ = w[:, -1]
789
+ else:
790
+ log_reg.coef_ = w
791
+ log_reg.intercept_ = 0.0
792
+
793
+ if scoring is None:
794
+ scores.append(log_reg.score(X_test, y_test))
795
+ else:
796
+ score_params = score_params or {}
797
+ score_params = _check_method_params(X=X, params=score_params, indices=test)
798
+ scores.append(scoring(log_reg, X_test, y_test, **score_params))
799
+
800
+ return coefs, Cs, np.array(scores), n_iter
801
+
802
+
803
+ class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
804
+ """
805
+ Logistic Regression (aka logit, MaxEnt) classifier.
806
+
807
+ In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
808
+ scheme if the 'multi_class' option is set to 'ovr', and uses the
809
+ cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
810
+ (Currently the 'multinomial' option is supported only by the 'lbfgs',
811
+ 'sag', 'saga' and 'newton-cg' solvers.)
812
+
813
+ This class implements regularized logistic regression using the
814
+ 'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
815
+ that regularization is applied by default**. It can handle both dense
816
+ and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
817
+ floats for optimal performance; any other input format will be converted
818
+ (and copied).
819
+
820
+ The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
821
+ with primal formulation, or no regularization. The 'liblinear' solver
822
+ supports both L1 and L2 regularization, with a dual formulation only for
823
+ the L2 penalty. The Elastic-Net regularization is only supported by the
824
+ 'saga' solver.
825
+
826
+ Read more in the :ref:`User Guide <logistic_regression>`.
827
+
828
+ Parameters
829
+ ----------
830
+ penalty : {'l1', 'l2', 'elasticnet', None}, default='l2'
831
+ Specify the norm of the penalty:
832
+
833
+ - `None`: no penalty is added;
834
+ - `'l2'`: add a L2 penalty term and it is the default choice;
835
+ - `'l1'`: add a L1 penalty term;
836
+ - `'elasticnet'`: both L1 and L2 penalty terms are added.
837
+
838
+ .. warning::
839
+ Some penalties may not work with some solvers. See the parameter
840
+ `solver` below, to know the compatibility between the penalty and
841
+ solver.
842
+
843
+ .. versionadded:: 0.19
844
+ l1 penalty with SAGA solver (allowing 'multinomial' + L1)
845
+
846
+ dual : bool, default=False
847
+ Dual (constrained) or primal (regularized, see also
848
+ :ref:`this equation <regularized-logistic-loss>`) formulation. Dual formulation
849
+ is only implemented for l2 penalty with liblinear solver. Prefer dual=False when
850
+ n_samples > n_features.
851
+
852
+ tol : float, default=1e-4
853
+ Tolerance for stopping criteria.
854
+
855
+ C : float, default=1.0
856
+ Inverse of regularization strength; must be a positive float.
857
+ Like in support vector machines, smaller values specify stronger
858
+ regularization.
859
+
860
+ fit_intercept : bool, default=True
861
+ Specifies if a constant (a.k.a. bias or intercept) should be
862
+ added to the decision function.
863
+
864
+ intercept_scaling : float, default=1
865
+ Useful only when the solver 'liblinear' is used
866
+ and self.fit_intercept is set to True. In this case, x becomes
867
+ [x, self.intercept_scaling],
868
+ i.e. a "synthetic" feature with constant value equal to
869
+ intercept_scaling is appended to the instance vector.
870
+ The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
871
+
872
+ Note! the synthetic feature weight is subject to l1/l2 regularization
873
+ as all other features.
874
+ To lessen the effect of regularization on synthetic feature weight
875
+ (and therefore on the intercept) intercept_scaling has to be increased.
876
+
877
+ class_weight : dict or 'balanced', default=None
878
+ Weights associated with classes in the form ``{class_label: weight}``.
879
+ If not given, all classes are supposed to have weight one.
880
+
881
+ The "balanced" mode uses the values of y to automatically adjust
882
+ weights inversely proportional to class frequencies in the input data
883
+ as ``n_samples / (n_classes * np.bincount(y))``.
884
+
885
+ Note that these weights will be multiplied with sample_weight (passed
886
+ through the fit method) if sample_weight is specified.
887
+
888
+ .. versionadded:: 0.17
889
+ *class_weight='balanced'*
890
+
891
+ random_state : int, RandomState instance, default=None
892
+ Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
893
+ data. See :term:`Glossary <random_state>` for details.
894
+
895
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
896
+ default='lbfgs'
897
+
898
+ Algorithm to use in the optimization problem. Default is 'lbfgs'.
899
+ To choose a solver, you might want to consider the following aspects:
900
+
901
+ - For small datasets, 'liblinear' is a good choice, whereas 'sag'
902
+ and 'saga' are faster for large ones;
903
+ - For multiclass problems, only 'newton-cg', 'sag', 'saga' and
904
+ 'lbfgs' handle multinomial loss;
905
+ - 'liblinear' is limited to one-versus-rest schemes.
906
+ - 'newton-cholesky' is a good choice for `n_samples` >> `n_features`,
907
+ especially with one-hot encoded categorical features with rare
908
+ categories. Note that it is limited to binary classification and the
909
+ one-versus-rest reduction for multiclass classification. Be aware that
910
+ the memory usage of this solver has a quadratic dependency on
911
+ `n_features` because it explicitly computes the Hessian matrix.
912
+
913
+ .. warning::
914
+ The choice of the algorithm depends on the penalty chosen.
915
+ Supported penalties by solver:
916
+
917
+ - 'lbfgs' - ['l2', None]
918
+ - 'liblinear' - ['l1', 'l2']
919
+ - 'newton-cg' - ['l2', None]
920
+ - 'newton-cholesky' - ['l2', None]
921
+ - 'sag' - ['l2', None]
922
+ - 'saga' - ['elasticnet', 'l1', 'l2', None]
923
+
924
+ .. note::
925
+ 'sag' and 'saga' fast convergence is only guaranteed on features
926
+ with approximately the same scale. You can preprocess the data with
927
+ a scaler from :mod:`sklearn.preprocessing`.
928
+
929
+ .. seealso::
930
+ Refer to the User Guide for more information regarding
931
+ :class:`LogisticRegression` and more specifically the
932
+ :ref:`Table <Logistic_regression>`
933
+ summarizing solver/penalty supports.
934
+
935
+ .. versionadded:: 0.17
936
+ Stochastic Average Gradient descent solver.
937
+ .. versionadded:: 0.19
938
+ SAGA solver.
939
+ .. versionchanged:: 0.22
940
+ The default solver changed from 'liblinear' to 'lbfgs' in 0.22.
941
+ .. versionadded:: 1.2
942
+ newton-cholesky solver.
943
+
944
+ max_iter : int, default=100
945
+ Maximum number of iterations taken for the solvers to converge.
946
+
947
+ multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
948
+ If the option chosen is 'ovr', then a binary problem is fit for each
949
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
950
+ across the entire probability distribution, *even when the data is
951
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
952
+ 'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
953
+ and otherwise selects 'multinomial'.
954
+
955
+ .. versionadded:: 0.18
956
+ Stochastic Average Gradient descent solver for 'multinomial' case.
957
+ .. versionchanged:: 0.22
958
+ Default changed from 'ovr' to 'auto' in 0.22.
959
+
960
+ verbose : int, default=0
961
+ For the liblinear and lbfgs solvers set verbose to any positive
962
+ number for verbosity.
963
+
964
+ warm_start : bool, default=False
965
+ When set to True, reuse the solution of the previous call to fit as
966
+ initialization, otherwise, just erase the previous solution.
967
+ Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
968
+
969
+ .. versionadded:: 0.17
970
+ *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
971
+
972
+ n_jobs : int, default=None
973
+ Number of CPU cores used when parallelizing over classes if
974
+ multi_class='ovr'". This parameter is ignored when the ``solver`` is
975
+ set to 'liblinear' regardless of whether 'multi_class' is specified or
976
+ not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
977
+ context. ``-1`` means using all processors.
978
+ See :term:`Glossary <n_jobs>` for more details.
979
+
980
+ l1_ratio : float, default=None
981
+ The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
982
+ used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
983
+ to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
984
+ to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
985
+ combination of L1 and L2.
986
+
987
+ Attributes
988
+ ----------
989
+
990
+ classes_ : ndarray of shape (n_classes, )
991
+ A list of class labels known to the classifier.
992
+
993
+ coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
994
+ Coefficient of the features in the decision function.
995
+
996
+ `coef_` is of shape (1, n_features) when the given problem is binary.
997
+ In particular, when `multi_class='multinomial'`, `coef_` corresponds
998
+ to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
999
+
1000
+ intercept_ : ndarray of shape (1,) or (n_classes,)
1001
+ Intercept (a.k.a. bias) added to the decision function.
1002
+
1003
+ If `fit_intercept` is set to False, the intercept is set to zero.
1004
+ `intercept_` is of shape (1,) when the given problem is binary.
1005
+ In particular, when `multi_class='multinomial'`, `intercept_`
1006
+ corresponds to outcome 1 (True) and `-intercept_` corresponds to
1007
+ outcome 0 (False).
1008
+
1009
+ n_features_in_ : int
1010
+ Number of features seen during :term:`fit`.
1011
+
1012
+ .. versionadded:: 0.24
1013
+
1014
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1015
+ Names of features seen during :term:`fit`. Defined only when `X`
1016
+ has feature names that are all strings.
1017
+
1018
+ .. versionadded:: 1.0
1019
+
1020
+ n_iter_ : ndarray of shape (n_classes,) or (1, )
1021
+ Actual number of iterations for all classes. If binary or multinomial,
1022
+ it returns only 1 element. For liblinear solver, only the maximum
1023
+ number of iteration across all classes is given.
1024
+
1025
+ .. versionchanged:: 0.20
1026
+
1027
+ In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
1028
+ ``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
1029
+
1030
+ See Also
1031
+ --------
1032
+ SGDClassifier : Incrementally trained logistic regression (when given
1033
+ the parameter ``loss="log_loss"``).
1034
+ LogisticRegressionCV : Logistic regression with built-in cross validation.
1035
+
1036
+ Notes
1037
+ -----
1038
+ The underlying C implementation uses a random number generator to
1039
+ select features when fitting the model. It is thus not uncommon,
1040
+ to have slightly different results for the same input data. If
1041
+ that happens, try with a smaller tol parameter.
1042
+
1043
+ Predict output may not match that of standalone liblinear in certain
1044
+ cases. See :ref:`differences from liblinear <liblinear_differences>`
1045
+ in the narrative documentation.
1046
+
1047
+ References
1048
+ ----------
1049
+
1050
+ L-BFGS-B -- Software for Large-scale Bound-constrained Optimization
1051
+ Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales.
1052
+ http://users.iems.northwestern.edu/~nocedal/lbfgsb.html
1053
+
1054
+ LIBLINEAR -- A Library for Large Linear Classification
1055
+ https://www.csie.ntu.edu.tw/~cjlin/liblinear/
1056
+
1057
+ SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
1058
+ Minimizing Finite Sums with the Stochastic Average Gradient
1059
+ https://hal.inria.fr/hal-00860051/document
1060
+
1061
+ SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
1062
+ :arxiv:`"SAGA: A Fast Incremental Gradient Method With Support
1063
+ for Non-Strongly Convex Composite Objectives" <1407.0202>`
1064
+
1065
+ Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
1066
+ methods for logistic regression and maximum entropy models.
1067
+ Machine Learning 85(1-2):41-75.
1068
+ https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
1069
+
1070
+ Examples
1071
+ --------
1072
+ >>> from sklearn.datasets import load_iris
1073
+ >>> from sklearn.linear_model import LogisticRegression
1074
+ >>> X, y = load_iris(return_X_y=True)
1075
+ >>> clf = LogisticRegression(random_state=0).fit(X, y)
1076
+ >>> clf.predict(X[:2, :])
1077
+ array([0, 0])
1078
+ >>> clf.predict_proba(X[:2, :])
1079
+ array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
1080
+ [9.7...e-01, 2.8...e-02, ...e-08]])
1081
+ >>> clf.score(X, y)
1082
+ 0.97...
1083
+ """
1084
+
1085
+ _parameter_constraints: dict = {
1086
+ "penalty": [StrOptions({"l1", "l2", "elasticnet"}), None],
1087
+ "dual": ["boolean"],
1088
+ "tol": [Interval(Real, 0, None, closed="left")],
1089
+ "C": [Interval(Real, 0, None, closed="right")],
1090
+ "fit_intercept": ["boolean"],
1091
+ "intercept_scaling": [Interval(Real, 0, None, closed="neither")],
1092
+ "class_weight": [dict, StrOptions({"balanced"}), None],
1093
+ "random_state": ["random_state"],
1094
+ "solver": [
1095
+ StrOptions(
1096
+ {"lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga"}
1097
+ )
1098
+ ],
1099
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
1100
+ "multi_class": [StrOptions({"auto", "ovr", "multinomial"})],
1101
+ "verbose": ["verbose"],
1102
+ "warm_start": ["boolean"],
1103
+ "n_jobs": [None, Integral],
1104
+ "l1_ratio": [Interval(Real, 0, 1, closed="both"), None],
1105
+ }
1106
+
1107
+ def __init__(
1108
+ self,
1109
+ penalty="l2",
1110
+ *,
1111
+ dual=False,
1112
+ tol=1e-4,
1113
+ C=1.0,
1114
+ fit_intercept=True,
1115
+ intercept_scaling=1,
1116
+ class_weight=None,
1117
+ random_state=None,
1118
+ solver="lbfgs",
1119
+ max_iter=100,
1120
+ multi_class="auto",
1121
+ verbose=0,
1122
+ warm_start=False,
1123
+ n_jobs=None,
1124
+ l1_ratio=None,
1125
+ ):
1126
+ self.penalty = penalty
1127
+ self.dual = dual
1128
+ self.tol = tol
1129
+ self.C = C
1130
+ self.fit_intercept = fit_intercept
1131
+ self.intercept_scaling = intercept_scaling
1132
+ self.class_weight = class_weight
1133
+ self.random_state = random_state
1134
+ self.solver = solver
1135
+ self.max_iter = max_iter
1136
+ self.multi_class = multi_class
1137
+ self.verbose = verbose
1138
+ self.warm_start = warm_start
1139
+ self.n_jobs = n_jobs
1140
+ self.l1_ratio = l1_ratio
1141
+
1142
+ @_fit_context(prefer_skip_nested_validation=True)
1143
+ def fit(self, X, y, sample_weight=None):
1144
+ """
1145
+ Fit the model according to the given training data.
1146
+
1147
+ Parameters
1148
+ ----------
1149
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1150
+ Training vector, where `n_samples` is the number of samples and
1151
+ `n_features` is the number of features.
1152
+
1153
+ y : array-like of shape (n_samples,)
1154
+ Target vector relative to X.
1155
+
1156
+ sample_weight : array-like of shape (n_samples,) default=None
1157
+ Array of weights that are assigned to individual samples.
1158
+ If not provided, then each sample is given unit weight.
1159
+
1160
+ .. versionadded:: 0.17
1161
+ *sample_weight* support to LogisticRegression.
1162
+
1163
+ Returns
1164
+ -------
1165
+ self
1166
+ Fitted estimator.
1167
+
1168
+ Notes
1169
+ -----
1170
+ The SAGA solver supports both float64 and float32 bit arrays.
1171
+ """
1172
+ solver = _check_solver(self.solver, self.penalty, self.dual)
1173
+
1174
+ if self.penalty != "elasticnet" and self.l1_ratio is not None:
1175
+ warnings.warn(
1176
+ "l1_ratio parameter is only used when penalty is "
1177
+ "'elasticnet'. Got "
1178
+ "(penalty={})".format(self.penalty)
1179
+ )
1180
+
1181
+ if self.penalty == "elasticnet" and self.l1_ratio is None:
1182
+ raise ValueError("l1_ratio must be specified when penalty is elasticnet.")
1183
+
1184
+ if self.penalty is None:
1185
+ if self.C != 1.0: # default values
1186
+ warnings.warn(
1187
+ "Setting penalty=None will ignore the C and l1_ratio parameters"
1188
+ )
1189
+ # Note that check for l1_ratio is done right above
1190
+ C_ = np.inf
1191
+ penalty = "l2"
1192
+ else:
1193
+ C_ = self.C
1194
+ penalty = self.penalty
1195
+
1196
+ if solver == "lbfgs":
1197
+ _dtype = np.float64
1198
+ else:
1199
+ _dtype = [np.float64, np.float32]
1200
+
1201
+ X, y = self._validate_data(
1202
+ X,
1203
+ y,
1204
+ accept_sparse="csr",
1205
+ dtype=_dtype,
1206
+ order="C",
1207
+ accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
1208
+ )
1209
+ check_classification_targets(y)
1210
+ self.classes_ = np.unique(y)
1211
+
1212
+ multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_))
1213
+
1214
+ if solver == "liblinear":
1215
+ if effective_n_jobs(self.n_jobs) != 1:
1216
+ warnings.warn(
1217
+ "'n_jobs' > 1 does not have any effect when"
1218
+ " 'solver' is set to 'liblinear'. Got 'n_jobs'"
1219
+ " = {}.".format(effective_n_jobs(self.n_jobs))
1220
+ )
1221
+ self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
1222
+ X,
1223
+ y,
1224
+ self.C,
1225
+ self.fit_intercept,
1226
+ self.intercept_scaling,
1227
+ self.class_weight,
1228
+ self.penalty,
1229
+ self.dual,
1230
+ self.verbose,
1231
+ self.max_iter,
1232
+ self.tol,
1233
+ self.random_state,
1234
+ sample_weight=sample_weight,
1235
+ )
1236
+ return self
1237
+
1238
+ if solver in ["sag", "saga"]:
1239
+ max_squared_sum = row_norms(X, squared=True).max()
1240
+ else:
1241
+ max_squared_sum = None
1242
+
1243
+ n_classes = len(self.classes_)
1244
+ classes_ = self.classes_
1245
+ if n_classes < 2:
1246
+ raise ValueError(
1247
+ "This solver needs samples of at least 2 classes"
1248
+ " in the data, but the data contains only one"
1249
+ " class: %r"
1250
+ % classes_[0]
1251
+ )
1252
+
1253
+ if len(self.classes_) == 2:
1254
+ n_classes = 1
1255
+ classes_ = classes_[1:]
1256
+
1257
+ if self.warm_start:
1258
+ warm_start_coef = getattr(self, "coef_", None)
1259
+ else:
1260
+ warm_start_coef = None
1261
+ if warm_start_coef is not None and self.fit_intercept:
1262
+ warm_start_coef = np.append(
1263
+ warm_start_coef, self.intercept_[:, np.newaxis], axis=1
1264
+ )
1265
+
1266
+ # Hack so that we iterate only once for the multinomial case.
1267
+ if multi_class == "multinomial":
1268
+ classes_ = [None]
1269
+ warm_start_coef = [warm_start_coef]
1270
+ if warm_start_coef is None:
1271
+ warm_start_coef = [None] * n_classes
1272
+
1273
+ path_func = delayed(_logistic_regression_path)
1274
+
1275
+ # The SAG solver releases the GIL so it's more efficient to use
1276
+ # threads for this solver.
1277
+ if solver in ["sag", "saga"]:
1278
+ prefer = "threads"
1279
+ else:
1280
+ prefer = "processes"
1281
+
1282
+ # TODO: Refactor this to avoid joblib parallelism entirely when doing binary
1283
+ # and multinomial multiclass classification and use joblib only for the
1284
+ # one-vs-rest multiclass case.
1285
+ if (
1286
+ solver in ["lbfgs", "newton-cg", "newton-cholesky"]
1287
+ and len(classes_) == 1
1288
+ and effective_n_jobs(self.n_jobs) == 1
1289
+ ):
1290
+ # In the future, we would like n_threads = _openmp_effective_n_threads()
1291
+ # For the time being, we just do
1292
+ n_threads = 1
1293
+ else:
1294
+ n_threads = 1
1295
+
1296
+ fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(
1297
+ path_func(
1298
+ X,
1299
+ y,
1300
+ pos_class=class_,
1301
+ Cs=[C_],
1302
+ l1_ratio=self.l1_ratio,
1303
+ fit_intercept=self.fit_intercept,
1304
+ tol=self.tol,
1305
+ verbose=self.verbose,
1306
+ solver=solver,
1307
+ multi_class=multi_class,
1308
+ max_iter=self.max_iter,
1309
+ class_weight=self.class_weight,
1310
+ check_input=False,
1311
+ random_state=self.random_state,
1312
+ coef=warm_start_coef_,
1313
+ penalty=penalty,
1314
+ max_squared_sum=max_squared_sum,
1315
+ sample_weight=sample_weight,
1316
+ n_threads=n_threads,
1317
+ )
1318
+ for class_, warm_start_coef_ in zip(classes_, warm_start_coef)
1319
+ )
1320
+
1321
+ fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
1322
+ self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
1323
+
1324
+ n_features = X.shape[1]
1325
+ if multi_class == "multinomial":
1326
+ self.coef_ = fold_coefs_[0][0]
1327
+ else:
1328
+ self.coef_ = np.asarray(fold_coefs_)
1329
+ self.coef_ = self.coef_.reshape(
1330
+ n_classes, n_features + int(self.fit_intercept)
1331
+ )
1332
+
1333
+ if self.fit_intercept:
1334
+ self.intercept_ = self.coef_[:, -1]
1335
+ self.coef_ = self.coef_[:, :-1]
1336
+ else:
1337
+ self.intercept_ = np.zeros(n_classes)
1338
+
1339
+ return self
1340
+
1341
+ def predict_proba(self, X):
1342
+ """
1343
+ Probability estimates.
1344
+
1345
+ The returned estimates for all classes are ordered by the
1346
+ label of classes.
1347
+
1348
+ For a multi_class problem, if multi_class is set to be "multinomial"
1349
+ the softmax function is used to find the predicted probability of
1350
+ each class.
1351
+ Else use a one-vs-rest approach, i.e. calculate the probability
1352
+ of each class assuming it to be positive using the logistic function.
1353
+ and normalize these values across all the classes.
1354
+
1355
+ Parameters
1356
+ ----------
1357
+ X : array-like of shape (n_samples, n_features)
1358
+ Vector to be scored, where `n_samples` is the number of samples and
1359
+ `n_features` is the number of features.
1360
+
1361
+ Returns
1362
+ -------
1363
+ T : array-like of shape (n_samples, n_classes)
1364
+ Returns the probability of the sample for each class in the model,
1365
+ where classes are ordered as they are in ``self.classes_``.
1366
+ """
1367
+ check_is_fitted(self)
1368
+
1369
+ ovr = self.multi_class in ["ovr", "warn"] or (
1370
+ self.multi_class == "auto"
1371
+ and (
1372
+ self.classes_.size <= 2
1373
+ or self.solver in ("liblinear", "newton-cholesky")
1374
+ )
1375
+ )
1376
+ if ovr:
1377
+ return super()._predict_proba_lr(X)
1378
+ else:
1379
+ decision = self.decision_function(X)
1380
+ if decision.ndim == 1:
1381
+ # Workaround for multi_class="multinomial" and binary outcomes
1382
+ # which requires softmax prediction with only a 1D decision.
1383
+ decision_2d = np.c_[-decision, decision]
1384
+ else:
1385
+ decision_2d = decision
1386
+ return softmax(decision_2d, copy=False)
1387
+
1388
+ def predict_log_proba(self, X):
1389
+ """
1390
+ Predict logarithm of probability estimates.
1391
+
1392
+ The returned estimates for all classes are ordered by the
1393
+ label of classes.
1394
+
1395
+ Parameters
1396
+ ----------
1397
+ X : array-like of shape (n_samples, n_features)
1398
+ Vector to be scored, where `n_samples` is the number of samples and
1399
+ `n_features` is the number of features.
1400
+
1401
+ Returns
1402
+ -------
1403
+ T : array-like of shape (n_samples, n_classes)
1404
+ Returns the log-probability of the sample for each class in the
1405
+ model, where classes are ordered as they are in ``self.classes_``.
1406
+ """
1407
+ return np.log(self.predict_proba(X))
1408
+
1409
+
1410
+ class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstimator):
1411
+ """Logistic Regression CV (aka logit, MaxEnt) classifier.
1412
+
1413
+ See glossary entry for :term:`cross-validation estimator`.
1414
+
1415
+ This class implements logistic regression using liblinear, newton-cg, sag
1416
+ of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
1417
+ regularization with primal formulation. The liblinear solver supports both
1418
+ L1 and L2 regularization, with a dual formulation only for the L2 penalty.
1419
+ Elastic-Net penalty is only supported by the saga solver.
1420
+
1421
+ For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter
1422
+ is selected by the cross-validator
1423
+ :class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed
1424
+ using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs'
1425
+ solvers can warm-start the coefficients (see :term:`Glossary<warm_start>`).
1426
+
1427
+ Read more in the :ref:`User Guide <logistic_regression>`.
1428
+
1429
+ Parameters
1430
+ ----------
1431
+ Cs : int or list of floats, default=10
1432
+ Each of the values in Cs describes the inverse of regularization
1433
+ strength. If Cs is as an int, then a grid of Cs values are chosen
1434
+ in a logarithmic scale between 1e-4 and 1e4.
1435
+ Like in support vector machines, smaller values specify stronger
1436
+ regularization.
1437
+
1438
+ fit_intercept : bool, default=True
1439
+ Specifies if a constant (a.k.a. bias or intercept) should be
1440
+ added to the decision function.
1441
+
1442
+ cv : int or cross-validation generator, default=None
1443
+ The default cross-validation generator used is Stratified K-Folds.
1444
+ If an integer is provided, then it is the number of folds used.
1445
+ See the module :mod:`sklearn.model_selection` module for the
1446
+ list of possible cross-validation objects.
1447
+
1448
+ .. versionchanged:: 0.22
1449
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1450
+
1451
+ dual : bool, default=False
1452
+ Dual (constrained) or primal (regularized, see also
1453
+ :ref:`this equation <regularized-logistic-loss>`) formulation. Dual formulation
1454
+ is only implemented for l2 penalty with liblinear solver. Prefer dual=False when
1455
+ n_samples > n_features.
1456
+
1457
+ penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
1458
+ Specify the norm of the penalty:
1459
+
1460
+ - `'l2'`: add a L2 penalty term (used by default);
1461
+ - `'l1'`: add a L1 penalty term;
1462
+ - `'elasticnet'`: both L1 and L2 penalty terms are added.
1463
+
1464
+ .. warning::
1465
+ Some penalties may not work with some solvers. See the parameter
1466
+ `solver` below, to know the compatibility between the penalty and
1467
+ solver.
1468
+
1469
+ scoring : str or callable, default=None
1470
+ A string (see model evaluation documentation) or
1471
+ a scorer callable object / function with signature
1472
+ ``scorer(estimator, X, y)``. For a list of scoring functions
1473
+ that can be used, look at :mod:`sklearn.metrics`. The
1474
+ default scoring option used is 'accuracy'.
1475
+
1476
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
1477
+ default='lbfgs'
1478
+
1479
+ Algorithm to use in the optimization problem. Default is 'lbfgs'.
1480
+ To choose a solver, you might want to consider the following aspects:
1481
+
1482
+ - For small datasets, 'liblinear' is a good choice, whereas 'sag'
1483
+ and 'saga' are faster for large ones;
1484
+ - For multiclass problems, only 'newton-cg', 'sag', 'saga' and
1485
+ 'lbfgs' handle multinomial loss;
1486
+ - 'liblinear' might be slower in :class:`LogisticRegressionCV`
1487
+ because it does not handle warm-starting. 'liblinear' is
1488
+ limited to one-versus-rest schemes.
1489
+ - 'newton-cholesky' is a good choice for `n_samples` >> `n_features`,
1490
+ especially with one-hot encoded categorical features with rare
1491
+ categories. Note that it is limited to binary classification and the
1492
+ one-versus-rest reduction for multiclass classification. Be aware that
1493
+ the memory usage of this solver has a quadratic dependency on
1494
+ `n_features` because it explicitly computes the Hessian matrix.
1495
+
1496
+ .. warning::
1497
+ The choice of the algorithm depends on the penalty chosen.
1498
+ Supported penalties by solver:
1499
+
1500
+ - 'lbfgs' - ['l2']
1501
+ - 'liblinear' - ['l1', 'l2']
1502
+ - 'newton-cg' - ['l2']
1503
+ - 'newton-cholesky' - ['l2']
1504
+ - 'sag' - ['l2']
1505
+ - 'saga' - ['elasticnet', 'l1', 'l2']
1506
+
1507
+ .. note::
1508
+ 'sag' and 'saga' fast convergence is only guaranteed on features
1509
+ with approximately the same scale. You can preprocess the data with
1510
+ a scaler from :mod:`sklearn.preprocessing`.
1511
+
1512
+ .. versionadded:: 0.17
1513
+ Stochastic Average Gradient descent solver.
1514
+ .. versionadded:: 0.19
1515
+ SAGA solver.
1516
+ .. versionadded:: 1.2
1517
+ newton-cholesky solver.
1518
+
1519
+ tol : float, default=1e-4
1520
+ Tolerance for stopping criteria.
1521
+
1522
+ max_iter : int, default=100
1523
+ Maximum number of iterations of the optimization algorithm.
1524
+
1525
+ class_weight : dict or 'balanced', default=None
1526
+ Weights associated with classes in the form ``{class_label: weight}``.
1527
+ If not given, all classes are supposed to have weight one.
1528
+
1529
+ The "balanced" mode uses the values of y to automatically adjust
1530
+ weights inversely proportional to class frequencies in the input data
1531
+ as ``n_samples / (n_classes * np.bincount(y))``.
1532
+
1533
+ Note that these weights will be multiplied with sample_weight (passed
1534
+ through the fit method) if sample_weight is specified.
1535
+
1536
+ .. versionadded:: 0.17
1537
+ class_weight == 'balanced'
1538
+
1539
+ n_jobs : int, default=None
1540
+ Number of CPU cores used during the cross-validation loop.
1541
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1542
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1543
+ for more details.
1544
+
1545
+ verbose : int, default=0
1546
+ For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
1547
+ positive number for verbosity.
1548
+
1549
+ refit : bool, default=True
1550
+ If set to True, the scores are averaged across all folds, and the
1551
+ coefs and the C that corresponds to the best score is taken, and a
1552
+ final refit is done using these parameters.
1553
+ Otherwise the coefs, intercepts and C that correspond to the
1554
+ best scores across folds are averaged.
1555
+
1556
+ intercept_scaling : float, default=1
1557
+ Useful only when the solver 'liblinear' is used
1558
+ and self.fit_intercept is set to True. In this case, x becomes
1559
+ [x, self.intercept_scaling],
1560
+ i.e. a "synthetic" feature with constant value equal to
1561
+ intercept_scaling is appended to the instance vector.
1562
+ The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
1563
+
1564
+ Note! the synthetic feature weight is subject to l1/l2 regularization
1565
+ as all other features.
1566
+ To lessen the effect of regularization on synthetic feature weight
1567
+ (and therefore on the intercept) intercept_scaling has to be increased.
1568
+
1569
+ multi_class : {'auto, 'ovr', 'multinomial'}, default='auto'
1570
+ If the option chosen is 'ovr', then a binary problem is fit for each
1571
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
1572
+ across the entire probability distribution, *even when the data is
1573
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
1574
+ 'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
1575
+ and otherwise selects 'multinomial'.
1576
+
1577
+ .. versionadded:: 0.18
1578
+ Stochastic Average Gradient descent solver for 'multinomial' case.
1579
+ .. versionchanged:: 0.22
1580
+ Default changed from 'ovr' to 'auto' in 0.22.
1581
+
1582
+ random_state : int, RandomState instance, default=None
1583
+ Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data.
1584
+ Note that this only applies to the solver and not the cross-validation
1585
+ generator. See :term:`Glossary <random_state>` for details.
1586
+
1587
+ l1_ratios : list of float, default=None
1588
+ The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
1589
+ Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
1590
+ using ``penalty='l2'``, while 1 is equivalent to using
1591
+ ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
1592
+ of L1 and L2.
1593
+
1594
+ Attributes
1595
+ ----------
1596
+ classes_ : ndarray of shape (n_classes, )
1597
+ A list of class labels known to the classifier.
1598
+
1599
+ coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
1600
+ Coefficient of the features in the decision function.
1601
+
1602
+ `coef_` is of shape (1, n_features) when the given problem
1603
+ is binary.
1604
+
1605
+ intercept_ : ndarray of shape (1,) or (n_classes,)
1606
+ Intercept (a.k.a. bias) added to the decision function.
1607
+
1608
+ If `fit_intercept` is set to False, the intercept is set to zero.
1609
+ `intercept_` is of shape(1,) when the problem is binary.
1610
+
1611
+ Cs_ : ndarray of shape (n_cs)
1612
+ Array of C i.e. inverse of regularization parameter values used
1613
+ for cross-validation.
1614
+
1615
+ l1_ratios_ : ndarray of shape (n_l1_ratios)
1616
+ Array of l1_ratios used for cross-validation. If no l1_ratio is used
1617
+ (i.e. penalty is not 'elasticnet'), this is set to ``[None]``
1618
+
1619
+ coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \
1620
+ (n_folds, n_cs, n_features + 1)
1621
+ dict with classes as the keys, and the path of coefficients obtained
1622
+ during cross-validating across each fold and then across each Cs
1623
+ after doing an OvR for the corresponding class as values.
1624
+ If the 'multi_class' option is set to 'multinomial', then
1625
+ the coefs_paths are the coefficients corresponding to each class.
1626
+ Each dict value has shape ``(n_folds, n_cs, n_features)`` or
1627
+ ``(n_folds, n_cs, n_features + 1)`` depending on whether the
1628
+ intercept is fit or not. If ``penalty='elasticnet'``, the shape is
1629
+ ``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
1630
+ ``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
1631
+
1632
+ scores_ : dict
1633
+ dict with classes as the keys, and the values as the
1634
+ grid of scores obtained during cross-validating each fold, after doing
1635
+ an OvR for the corresponding class. If the 'multi_class' option
1636
+ given is 'multinomial' then the same scores are repeated across
1637
+ all classes, since this is the multinomial class. Each dict value
1638
+ has shape ``(n_folds, n_cs)`` or ``(n_folds, n_cs, n_l1_ratios)`` if
1639
+ ``penalty='elasticnet'``.
1640
+
1641
+ C_ : ndarray of shape (n_classes,) or (n_classes - 1,)
1642
+ Array of C that maps to the best scores across every class. If refit is
1643
+ set to False, then for each class, the best C is the average of the
1644
+ C's that correspond to the best scores for each fold.
1645
+ `C_` is of shape(n_classes,) when the problem is binary.
1646
+
1647
+ l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,)
1648
+ Array of l1_ratio that maps to the best scores across every class. If
1649
+ refit is set to False, then for each class, the best l1_ratio is the
1650
+ average of the l1_ratio's that correspond to the best scores for each
1651
+ fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
1652
+
1653
+ n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
1654
+ Actual number of iterations for all classes, folds and Cs.
1655
+ In the binary or multinomial cases, the first dimension is equal to 1.
1656
+ If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
1657
+ n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
1658
+
1659
+ n_features_in_ : int
1660
+ Number of features seen during :term:`fit`.
1661
+
1662
+ .. versionadded:: 0.24
1663
+
1664
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1665
+ Names of features seen during :term:`fit`. Defined only when `X`
1666
+ has feature names that are all strings.
1667
+
1668
+ .. versionadded:: 1.0
1669
+
1670
+ See Also
1671
+ --------
1672
+ LogisticRegression : Logistic regression without tuning the
1673
+ hyperparameter `C`.
1674
+
1675
+ Examples
1676
+ --------
1677
+ >>> from sklearn.datasets import load_iris
1678
+ >>> from sklearn.linear_model import LogisticRegressionCV
1679
+ >>> X, y = load_iris(return_X_y=True)
1680
+ >>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y)
1681
+ >>> clf.predict(X[:2, :])
1682
+ array([0, 0])
1683
+ >>> clf.predict_proba(X[:2, :]).shape
1684
+ (2, 3)
1685
+ >>> clf.score(X, y)
1686
+ 0.98...
1687
+ """
1688
+
1689
+ _parameter_constraints: dict = {**LogisticRegression._parameter_constraints}
1690
+
1691
+ for param in ["C", "warm_start", "l1_ratio"]:
1692
+ _parameter_constraints.pop(param)
1693
+
1694
+ _parameter_constraints.update(
1695
+ {
1696
+ "Cs": [Interval(Integral, 1, None, closed="left"), "array-like"],
1697
+ "cv": ["cv_object"],
1698
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
1699
+ "l1_ratios": ["array-like", None],
1700
+ "refit": ["boolean"],
1701
+ "penalty": [StrOptions({"l1", "l2", "elasticnet"})],
1702
+ }
1703
+ )
1704
+
1705
+ def __init__(
1706
+ self,
1707
+ *,
1708
+ Cs=10,
1709
+ fit_intercept=True,
1710
+ cv=None,
1711
+ dual=False,
1712
+ penalty="l2",
1713
+ scoring=None,
1714
+ solver="lbfgs",
1715
+ tol=1e-4,
1716
+ max_iter=100,
1717
+ class_weight=None,
1718
+ n_jobs=None,
1719
+ verbose=0,
1720
+ refit=True,
1721
+ intercept_scaling=1.0,
1722
+ multi_class="auto",
1723
+ random_state=None,
1724
+ l1_ratios=None,
1725
+ ):
1726
+ self.Cs = Cs
1727
+ self.fit_intercept = fit_intercept
1728
+ self.cv = cv
1729
+ self.dual = dual
1730
+ self.penalty = penalty
1731
+ self.scoring = scoring
1732
+ self.tol = tol
1733
+ self.max_iter = max_iter
1734
+ self.class_weight = class_weight
1735
+ self.n_jobs = n_jobs
1736
+ self.verbose = verbose
1737
+ self.solver = solver
1738
+ self.refit = refit
1739
+ self.intercept_scaling = intercept_scaling
1740
+ self.multi_class = multi_class
1741
+ self.random_state = random_state
1742
+ self.l1_ratios = l1_ratios
1743
+
1744
+ @_fit_context(prefer_skip_nested_validation=True)
1745
+ def fit(self, X, y, sample_weight=None, **params):
1746
+ """Fit the model according to the given training data.
1747
+
1748
+ Parameters
1749
+ ----------
1750
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1751
+ Training vector, where `n_samples` is the number of samples and
1752
+ `n_features` is the number of features.
1753
+
1754
+ y : array-like of shape (n_samples,)
1755
+ Target vector relative to X.
1756
+
1757
+ sample_weight : array-like of shape (n_samples,) default=None
1758
+ Array of weights that are assigned to individual samples.
1759
+ If not provided, then each sample is given unit weight.
1760
+
1761
+ **params : dict
1762
+ Parameters to pass to the underlying splitter and scorer.
1763
+
1764
+ .. versionadded:: 1.4
1765
+
1766
+ Returns
1767
+ -------
1768
+ self : object
1769
+ Fitted LogisticRegressionCV estimator.
1770
+ """
1771
+ _raise_for_params(params, self, "fit")
1772
+
1773
+ solver = _check_solver(self.solver, self.penalty, self.dual)
1774
+
1775
+ if self.penalty == "elasticnet":
1776
+ if (
1777
+ self.l1_ratios is None
1778
+ or len(self.l1_ratios) == 0
1779
+ or any(
1780
+ (
1781
+ not isinstance(l1_ratio, numbers.Number)
1782
+ or l1_ratio < 0
1783
+ or l1_ratio > 1
1784
+ )
1785
+ for l1_ratio in self.l1_ratios
1786
+ )
1787
+ ):
1788
+ raise ValueError(
1789
+ "l1_ratios must be a list of numbers between "
1790
+ "0 and 1; got (l1_ratios=%r)"
1791
+ % self.l1_ratios
1792
+ )
1793
+ l1_ratios_ = self.l1_ratios
1794
+ else:
1795
+ if self.l1_ratios is not None:
1796
+ warnings.warn(
1797
+ "l1_ratios parameter is only used when penalty "
1798
+ "is 'elasticnet'. Got (penalty={})".format(self.penalty)
1799
+ )
1800
+
1801
+ l1_ratios_ = [None]
1802
+
1803
+ X, y = self._validate_data(
1804
+ X,
1805
+ y,
1806
+ accept_sparse="csr",
1807
+ dtype=np.float64,
1808
+ order="C",
1809
+ accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
1810
+ )
1811
+ check_classification_targets(y)
1812
+
1813
+ class_weight = self.class_weight
1814
+
1815
+ # Encode for string labels
1816
+ label_encoder = LabelEncoder().fit(y)
1817
+ y = label_encoder.transform(y)
1818
+ if isinstance(class_weight, dict):
1819
+ class_weight = {
1820
+ label_encoder.transform([cls])[0]: v for cls, v in class_weight.items()
1821
+ }
1822
+
1823
+ # The original class labels
1824
+ classes = self.classes_ = label_encoder.classes_
1825
+ encoded_labels = label_encoder.transform(label_encoder.classes_)
1826
+
1827
+ multi_class = _check_multi_class(self.multi_class, solver, len(classes))
1828
+
1829
+ if solver in ["sag", "saga"]:
1830
+ max_squared_sum = row_norms(X, squared=True).max()
1831
+ else:
1832
+ max_squared_sum = None
1833
+
1834
+ if _routing_enabled():
1835
+ routed_params = process_routing(
1836
+ self,
1837
+ "fit",
1838
+ sample_weight=sample_weight,
1839
+ **params,
1840
+ )
1841
+ else:
1842
+ routed_params = Bunch()
1843
+ routed_params.splitter = Bunch(split={})
1844
+ routed_params.scorer = Bunch(score=params)
1845
+ if sample_weight is not None:
1846
+ routed_params.scorer.score["sample_weight"] = sample_weight
1847
+
1848
+ # init cross-validation generator
1849
+ cv = check_cv(self.cv, y, classifier=True)
1850
+ folds = list(cv.split(X, y, **routed_params.splitter.split))
1851
+
1852
+ # Use the label encoded classes
1853
+ n_classes = len(encoded_labels)
1854
+
1855
+ if n_classes < 2:
1856
+ raise ValueError(
1857
+ "This solver needs samples of at least 2 classes"
1858
+ " in the data, but the data contains only one"
1859
+ " class: %r"
1860
+ % classes[0]
1861
+ )
1862
+
1863
+ if n_classes == 2:
1864
+ # OvR in case of binary problems is as good as fitting
1865
+ # the higher label
1866
+ n_classes = 1
1867
+ encoded_labels = encoded_labels[1:]
1868
+ classes = classes[1:]
1869
+
1870
+ # We need this hack to iterate only once over labels, in the case of
1871
+ # multi_class = multinomial, without changing the value of the labels.
1872
+ if multi_class == "multinomial":
1873
+ iter_encoded_labels = iter_classes = [None]
1874
+ else:
1875
+ iter_encoded_labels = encoded_labels
1876
+ iter_classes = classes
1877
+
1878
+ # compute the class weights for the entire dataset y
1879
+ if class_weight == "balanced":
1880
+ class_weight = compute_class_weight(
1881
+ class_weight, classes=np.arange(len(self.classes_)), y=y
1882
+ )
1883
+ class_weight = dict(enumerate(class_weight))
1884
+
1885
+ path_func = delayed(_log_reg_scoring_path)
1886
+
1887
+ # The SAG solver releases the GIL so it's more efficient to use
1888
+ # threads for this solver.
1889
+ if self.solver in ["sag", "saga"]:
1890
+ prefer = "threads"
1891
+ else:
1892
+ prefer = "processes"
1893
+
1894
+ fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(
1895
+ path_func(
1896
+ X,
1897
+ y,
1898
+ train,
1899
+ test,
1900
+ pos_class=label,
1901
+ Cs=self.Cs,
1902
+ fit_intercept=self.fit_intercept,
1903
+ penalty=self.penalty,
1904
+ dual=self.dual,
1905
+ solver=solver,
1906
+ tol=self.tol,
1907
+ max_iter=self.max_iter,
1908
+ verbose=self.verbose,
1909
+ class_weight=class_weight,
1910
+ scoring=self.scoring,
1911
+ multi_class=multi_class,
1912
+ intercept_scaling=self.intercept_scaling,
1913
+ random_state=self.random_state,
1914
+ max_squared_sum=max_squared_sum,
1915
+ sample_weight=sample_weight,
1916
+ l1_ratio=l1_ratio,
1917
+ score_params=routed_params.scorer.score,
1918
+ )
1919
+ for label in iter_encoded_labels
1920
+ for train, test in folds
1921
+ for l1_ratio in l1_ratios_
1922
+ )
1923
+
1924
+ # _log_reg_scoring_path will output different shapes depending on the
1925
+ # multi_class param, so we need to reshape the outputs accordingly.
1926
+ # Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
1927
+ # rows are equal, so we just take the first one.
1928
+ # After reshaping,
1929
+ # - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
1930
+ # - coefs_paths is of shape
1931
+ # (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
1932
+ # - n_iter is of shape
1933
+ # (n_classes, n_folds, n_Cs . n_l1_ratios) or
1934
+ # (1, n_folds, n_Cs . n_l1_ratios)
1935
+ coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
1936
+ self.Cs_ = Cs[0]
1937
+ if multi_class == "multinomial":
1938
+ coefs_paths = np.reshape(
1939
+ coefs_paths,
1940
+ (len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1),
1941
+ )
1942
+ # equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
1943
+ # (1, 2, 0, 3))
1944
+ coefs_paths = np.swapaxes(coefs_paths, 0, 1)
1945
+ coefs_paths = np.swapaxes(coefs_paths, 0, 2)
1946
+ self.n_iter_ = np.reshape(
1947
+ n_iter_, (1, len(folds), len(self.Cs_) * len(l1_ratios_))
1948
+ )
1949
+ # repeat same scores across all classes
1950
+ scores = np.tile(scores, (n_classes, 1, 1))
1951
+ else:
1952
+ coefs_paths = np.reshape(
1953
+ coefs_paths,
1954
+ (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_), -1),
1955
+ )
1956
+ self.n_iter_ = np.reshape(
1957
+ n_iter_, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
1958
+ )
1959
+ scores = np.reshape(scores, (n_classes, len(folds), -1))
1960
+ self.scores_ = dict(zip(classes, scores))
1961
+ self.coefs_paths_ = dict(zip(classes, coefs_paths))
1962
+
1963
+ self.C_ = list()
1964
+ self.l1_ratio_ = list()
1965
+ self.coef_ = np.empty((n_classes, X.shape[1]))
1966
+ self.intercept_ = np.zeros(n_classes)
1967
+ for index, (cls, encoded_label) in enumerate(
1968
+ zip(iter_classes, iter_encoded_labels)
1969
+ ):
1970
+ if multi_class == "ovr":
1971
+ scores = self.scores_[cls]
1972
+ coefs_paths = self.coefs_paths_[cls]
1973
+ else:
1974
+ # For multinomial, all scores are the same across classes
1975
+ scores = scores[0]
1976
+ # coefs_paths will keep its original shape because
1977
+ # logistic_regression_path expects it this way
1978
+
1979
+ if self.refit:
1980
+ # best_index is between 0 and (n_Cs . n_l1_ratios - 1)
1981
+ # for example, with n_cs=2 and n_l1_ratios=3
1982
+ # the layout of scores is
1983
+ # [c1, c2, c1, c2, c1, c2]
1984
+ # l1_1 , l1_2 , l1_3
1985
+ best_index = scores.sum(axis=0).argmax()
1986
+
1987
+ best_index_C = best_index % len(self.Cs_)
1988
+ C_ = self.Cs_[best_index_C]
1989
+ self.C_.append(C_)
1990
+
1991
+ best_index_l1 = best_index // len(self.Cs_)
1992
+ l1_ratio_ = l1_ratios_[best_index_l1]
1993
+ self.l1_ratio_.append(l1_ratio_)
1994
+
1995
+ if multi_class == "multinomial":
1996
+ coef_init = np.mean(coefs_paths[:, :, best_index, :], axis=1)
1997
+ else:
1998
+ coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
1999
+
2000
+ # Note that y is label encoded and hence pos_class must be
2001
+ # the encoded label / None (for 'multinomial')
2002
+ w, _, _ = _logistic_regression_path(
2003
+ X,
2004
+ y,
2005
+ pos_class=encoded_label,
2006
+ Cs=[C_],
2007
+ solver=solver,
2008
+ fit_intercept=self.fit_intercept,
2009
+ coef=coef_init,
2010
+ max_iter=self.max_iter,
2011
+ tol=self.tol,
2012
+ penalty=self.penalty,
2013
+ class_weight=class_weight,
2014
+ multi_class=multi_class,
2015
+ verbose=max(0, self.verbose - 1),
2016
+ random_state=self.random_state,
2017
+ check_input=False,
2018
+ max_squared_sum=max_squared_sum,
2019
+ sample_weight=sample_weight,
2020
+ l1_ratio=l1_ratio_,
2021
+ )
2022
+ w = w[0]
2023
+
2024
+ else:
2025
+ # Take the best scores across every fold and the average of
2026
+ # all coefficients corresponding to the best scores.
2027
+ best_indices = np.argmax(scores, axis=1)
2028
+ if multi_class == "ovr":
2029
+ w = np.mean(
2030
+ [coefs_paths[i, best_indices[i], :] for i in range(len(folds))],
2031
+ axis=0,
2032
+ )
2033
+ else:
2034
+ w = np.mean(
2035
+ [
2036
+ coefs_paths[:, i, best_indices[i], :]
2037
+ for i in range(len(folds))
2038
+ ],
2039
+ axis=0,
2040
+ )
2041
+
2042
+ best_indices_C = best_indices % len(self.Cs_)
2043
+ self.C_.append(np.mean(self.Cs_[best_indices_C]))
2044
+
2045
+ if self.penalty == "elasticnet":
2046
+ best_indices_l1 = best_indices // len(self.Cs_)
2047
+ self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
2048
+ else:
2049
+ self.l1_ratio_.append(None)
2050
+
2051
+ if multi_class == "multinomial":
2052
+ self.C_ = np.tile(self.C_, n_classes)
2053
+ self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
2054
+ self.coef_ = w[:, : X.shape[1]]
2055
+ if self.fit_intercept:
2056
+ self.intercept_ = w[:, -1]
2057
+ else:
2058
+ self.coef_[index] = w[: X.shape[1]]
2059
+ if self.fit_intercept:
2060
+ self.intercept_[index] = w[-1]
2061
+
2062
+ self.C_ = np.asarray(self.C_)
2063
+ self.l1_ratio_ = np.asarray(self.l1_ratio_)
2064
+ self.l1_ratios_ = np.asarray(l1_ratios_)
2065
+ # if elasticnet was used, add the l1_ratios dimension to some
2066
+ # attributes
2067
+ if self.l1_ratios is not None:
2068
+ # with n_cs=2 and n_l1_ratios=3
2069
+ # the layout of scores is
2070
+ # [c1, c2, c1, c2, c1, c2]
2071
+ # l1_1 , l1_2 , l1_3
2072
+ # To get a 2d array with the following layout
2073
+ # l1_1, l1_2, l1_3
2074
+ # c1 [[ . , . , . ],
2075
+ # c2 [ . , . , . ]]
2076
+ # We need to first reshape and then transpose.
2077
+ # The same goes for the other arrays
2078
+ for cls, coefs_path in self.coefs_paths_.items():
2079
+ self.coefs_paths_[cls] = coefs_path.reshape(
2080
+ (len(folds), self.l1_ratios_.size, self.Cs_.size, -1)
2081
+ )
2082
+ self.coefs_paths_[cls] = np.transpose(
2083
+ self.coefs_paths_[cls], (0, 2, 1, 3)
2084
+ )
2085
+ for cls, score in self.scores_.items():
2086
+ self.scores_[cls] = score.reshape(
2087
+ (len(folds), self.l1_ratios_.size, self.Cs_.size)
2088
+ )
2089
+ self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1))
2090
+
2091
+ self.n_iter_ = self.n_iter_.reshape(
2092
+ (-1, len(folds), self.l1_ratios_.size, self.Cs_.size)
2093
+ )
2094
+ self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2))
2095
+
2096
+ return self
2097
+
2098
+ def score(self, X, y, sample_weight=None, **score_params):
2099
+ """Score using the `scoring` option on the given test data and labels.
2100
+
2101
+ Parameters
2102
+ ----------
2103
+ X : array-like of shape (n_samples, n_features)
2104
+ Test samples.
2105
+
2106
+ y : array-like of shape (n_samples,)
2107
+ True labels for X.
2108
+
2109
+ sample_weight : array-like of shape (n_samples,), default=None
2110
+ Sample weights.
2111
+
2112
+ **score_params : dict
2113
+ Parameters to pass to the `score` method of the underlying scorer.
2114
+
2115
+ .. versionadded:: 1.4
2116
+
2117
+ Returns
2118
+ -------
2119
+ score : float
2120
+ Score of self.predict(X) w.r.t. y.
2121
+ """
2122
+ _raise_for_params(score_params, self, "score")
2123
+
2124
+ scoring = self._get_scorer()
2125
+ if _routing_enabled():
2126
+ routed_params = process_routing(
2127
+ self,
2128
+ "score",
2129
+ sample_weight=sample_weight,
2130
+ **score_params,
2131
+ )
2132
+ else:
2133
+ routed_params = Bunch()
2134
+ routed_params.scorer = Bunch(score={})
2135
+ if sample_weight is not None:
2136
+ routed_params.scorer.score["sample_weight"] = sample_weight
2137
+
2138
+ return scoring(
2139
+ self,
2140
+ X,
2141
+ y,
2142
+ **routed_params.scorer.score,
2143
+ )
2144
+
2145
+ def get_metadata_routing(self):
2146
+ """Get metadata routing of this object.
2147
+
2148
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
2149
+ mechanism works.
2150
+
2151
+ .. versionadded:: 1.4
2152
+
2153
+ Returns
2154
+ -------
2155
+ routing : MetadataRouter
2156
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
2157
+ routing information.
2158
+ """
2159
+
2160
+ router = (
2161
+ MetadataRouter(owner=self.__class__.__name__)
2162
+ .add_self_request(self)
2163
+ .add(
2164
+ splitter=self.cv,
2165
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
2166
+ )
2167
+ .add(
2168
+ scorer=self._get_scorer(),
2169
+ method_mapping=MethodMapping()
2170
+ .add(callee="score", caller="score")
2171
+ .add(callee="score", caller="fit"),
2172
+ )
2173
+ )
2174
+ return router
2175
+
2176
+ def _more_tags(self):
2177
+ return {
2178
+ "_xfail_checks": {
2179
+ "check_sample_weights_invariance": (
2180
+ "zero sample_weight is not equivalent to removing samples"
2181
+ ),
2182
+ }
2183
+ }
2184
+
2185
+ def _get_scorer(self):
2186
+ """Get the scorer based on the scoring method specified.
2187
+ The default scoring method is `accuracy`.
2188
+ """
2189
+ scoring = self.scoring or "accuracy"
2190
+ return get_scorer(scoring)
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py ADDED
@@ -0,0 +1,623 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Johannes Schönberger
2
+ #
3
+ # License: BSD 3 clause
4
+
5
+ import warnings
6
+ from numbers import Integral, Real
7
+
8
+ import numpy as np
9
+
10
+ from ..base import (
11
+ BaseEstimator,
12
+ MetaEstimatorMixin,
13
+ MultiOutputMixin,
14
+ RegressorMixin,
15
+ _fit_context,
16
+ clone,
17
+ )
18
+ from ..exceptions import ConvergenceWarning
19
+ from ..utils import check_consistent_length, check_random_state
20
+ from ..utils._param_validation import (
21
+ HasMethods,
22
+ Interval,
23
+ Options,
24
+ RealNotInt,
25
+ StrOptions,
26
+ )
27
+ from ..utils.metadata_routing import (
28
+ _raise_for_unsupported_routing,
29
+ _RoutingNotSupportedMixin,
30
+ )
31
+ from ..utils.random import sample_without_replacement
32
+ from ..utils.validation import _check_sample_weight, check_is_fitted, has_fit_parameter
33
+ from ._base import LinearRegression
34
+
35
+ _EPSILON = np.spacing(1)
36
+
37
+
38
+ def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
39
+ """Determine number trials such that at least one outlier-free subset is
40
+ sampled for the given inlier/outlier ratio.
41
+
42
+ Parameters
43
+ ----------
44
+ n_inliers : int
45
+ Number of inliers in the data.
46
+
47
+ n_samples : int
48
+ Total number of samples in the data.
49
+
50
+ min_samples : int
51
+ Minimum number of samples chosen randomly from original data.
52
+
53
+ probability : float
54
+ Probability (confidence) that one outlier-free sample is generated.
55
+
56
+ Returns
57
+ -------
58
+ trials : int
59
+ Number of trials.
60
+
61
+ """
62
+ inlier_ratio = n_inliers / float(n_samples)
63
+ nom = max(_EPSILON, 1 - probability)
64
+ denom = max(_EPSILON, 1 - inlier_ratio**min_samples)
65
+ if nom == 1:
66
+ return 0
67
+ if denom == 1:
68
+ return float("inf")
69
+ return abs(float(np.ceil(np.log(nom) / np.log(denom))))
70
+
71
+
72
+ class RANSACRegressor(
73
+ _RoutingNotSupportedMixin,
74
+ MetaEstimatorMixin,
75
+ RegressorMixin,
76
+ MultiOutputMixin,
77
+ BaseEstimator,
78
+ ):
79
+ """RANSAC (RANdom SAmple Consensus) algorithm.
80
+
81
+ RANSAC is an iterative algorithm for the robust estimation of parameters
82
+ from a subset of inliers from the complete data set.
83
+
84
+ Read more in the :ref:`User Guide <ransac_regression>`.
85
+
86
+ Parameters
87
+ ----------
88
+ estimator : object, default=None
89
+ Base estimator object which implements the following methods:
90
+
91
+ * `fit(X, y)`: Fit model to given training data and target values.
92
+ * `score(X, y)`: Returns the mean accuracy on the given test data,
93
+ which is used for the stop criterion defined by `stop_score`.
94
+ Additionally, the score is used to decide which of two equally
95
+ large consensus sets is chosen as the better one.
96
+ * `predict(X)`: Returns predicted values using the linear model,
97
+ which is used to compute residual error using loss function.
98
+
99
+ If `estimator` is None, then
100
+ :class:`~sklearn.linear_model.LinearRegression` is used for
101
+ target values of dtype float.
102
+
103
+ Note that the current implementation only supports regression
104
+ estimators.
105
+
106
+ min_samples : int (>= 1) or float ([0, 1]), default=None
107
+ Minimum number of samples chosen randomly from original data. Treated
108
+ as an absolute number of samples for `min_samples >= 1`, treated as a
109
+ relative number `ceil(min_samples * X.shape[0])` for
110
+ `min_samples < 1`. This is typically chosen as the minimal number of
111
+ samples necessary to estimate the given `estimator`. By default a
112
+ :class:`~sklearn.linear_model.LinearRegression` estimator is assumed and
113
+ `min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly
114
+ dependent upon the model, so if a `estimator` other than
115
+ :class:`~sklearn.linear_model.LinearRegression` is used, the user must
116
+ provide a value.
117
+
118
+ residual_threshold : float, default=None
119
+ Maximum residual for a data sample to be classified as an inlier.
120
+ By default the threshold is chosen as the MAD (median absolute
121
+ deviation) of the target values `y`. Points whose residuals are
122
+ strictly equal to the threshold are considered as inliers.
123
+
124
+ is_data_valid : callable, default=None
125
+ This function is called with the randomly selected data before the
126
+ model is fitted to it: `is_data_valid(X, y)`. If its return value is
127
+ False the current randomly chosen sub-sample is skipped.
128
+
129
+ is_model_valid : callable, default=None
130
+ This function is called with the estimated model and the randomly
131
+ selected data: `is_model_valid(model, X, y)`. If its return value is
132
+ False the current randomly chosen sub-sample is skipped.
133
+ Rejecting samples with this function is computationally costlier than
134
+ with `is_data_valid`. `is_model_valid` should therefore only be used if
135
+ the estimated model is needed for making the rejection decision.
136
+
137
+ max_trials : int, default=100
138
+ Maximum number of iterations for random sample selection.
139
+
140
+ max_skips : int, default=np.inf
141
+ Maximum number of iterations that can be skipped due to finding zero
142
+ inliers or invalid data defined by ``is_data_valid`` or invalid models
143
+ defined by ``is_model_valid``.
144
+
145
+ .. versionadded:: 0.19
146
+
147
+ stop_n_inliers : int, default=np.inf
148
+ Stop iteration if at least this number of inliers are found.
149
+
150
+ stop_score : float, default=np.inf
151
+ Stop iteration if score is greater equal than this threshold.
152
+
153
+ stop_probability : float in range [0, 1], default=0.99
154
+ RANSAC iteration stops if at least one outlier-free set of the training
155
+ data is sampled in RANSAC. This requires to generate at least N
156
+ samples (iterations)::
157
+
158
+ N >= log(1 - probability) / log(1 - e**m)
159
+
160
+ where the probability (confidence) is typically set to high value such
161
+ as 0.99 (the default) and e is the current fraction of inliers w.r.t.
162
+ the total number of samples.
163
+
164
+ loss : str, callable, default='absolute_error'
165
+ String inputs, 'absolute_error' and 'squared_error' are supported which
166
+ find the absolute error and squared error per sample respectively.
167
+
168
+ If ``loss`` is a callable, then it should be a function that takes
169
+ two arrays as inputs, the true and predicted value and returns a 1-D
170
+ array with the i-th value of the array corresponding to the loss
171
+ on ``X[i]``.
172
+
173
+ If the loss on a sample is greater than the ``residual_threshold``,
174
+ then this sample is classified as an outlier.
175
+
176
+ .. versionadded:: 0.18
177
+
178
+ random_state : int, RandomState instance, default=None
179
+ The generator used to initialize the centers.
180
+ Pass an int for reproducible output across multiple function calls.
181
+ See :term:`Glossary <random_state>`.
182
+
183
+ Attributes
184
+ ----------
185
+ estimator_ : object
186
+ Best fitted model (copy of the `estimator` object).
187
+
188
+ n_trials_ : int
189
+ Number of random selection trials until one of the stop criteria is
190
+ met. It is always ``<= max_trials``.
191
+
192
+ inlier_mask_ : bool array of shape [n_samples]
193
+ Boolean mask of inliers classified as ``True``.
194
+
195
+ n_skips_no_inliers_ : int
196
+ Number of iterations skipped due to finding zero inliers.
197
+
198
+ .. versionadded:: 0.19
199
+
200
+ n_skips_invalid_data_ : int
201
+ Number of iterations skipped due to invalid data defined by
202
+ ``is_data_valid``.
203
+
204
+ .. versionadded:: 0.19
205
+
206
+ n_skips_invalid_model_ : int
207
+ Number of iterations skipped due to an invalid model defined by
208
+ ``is_model_valid``.
209
+
210
+ .. versionadded:: 0.19
211
+
212
+ n_features_in_ : int
213
+ Number of features seen during :term:`fit`.
214
+
215
+ .. versionadded:: 0.24
216
+
217
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
218
+ Names of features seen during :term:`fit`. Defined only when `X`
219
+ has feature names that are all strings.
220
+
221
+ .. versionadded:: 1.0
222
+
223
+ See Also
224
+ --------
225
+ HuberRegressor : Linear regression model that is robust to outliers.
226
+ TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
227
+ SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
228
+
229
+ References
230
+ ----------
231
+ .. [1] https://en.wikipedia.org/wiki/RANSAC
232
+ .. [2] https://www.sri.com/wp-content/uploads/2021/12/ransac-publication.pdf
233
+ .. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
234
+
235
+ Examples
236
+ --------
237
+ >>> from sklearn.linear_model import RANSACRegressor
238
+ >>> from sklearn.datasets import make_regression
239
+ >>> X, y = make_regression(
240
+ ... n_samples=200, n_features=2, noise=4.0, random_state=0)
241
+ >>> reg = RANSACRegressor(random_state=0).fit(X, y)
242
+ >>> reg.score(X, y)
243
+ 0.9885...
244
+ >>> reg.predict(X[:1,])
245
+ array([-31.9417...])
246
+ """ # noqa: E501
247
+
248
+ _parameter_constraints: dict = {
249
+ "estimator": [HasMethods(["fit", "score", "predict"]), None],
250
+ "min_samples": [
251
+ Interval(Integral, 1, None, closed="left"),
252
+ Interval(RealNotInt, 0, 1, closed="both"),
253
+ None,
254
+ ],
255
+ "residual_threshold": [Interval(Real, 0, None, closed="left"), None],
256
+ "is_data_valid": [callable, None],
257
+ "is_model_valid": [callable, None],
258
+ "max_trials": [
259
+ Interval(Integral, 0, None, closed="left"),
260
+ Options(Real, {np.inf}),
261
+ ],
262
+ "max_skips": [
263
+ Interval(Integral, 0, None, closed="left"),
264
+ Options(Real, {np.inf}),
265
+ ],
266
+ "stop_n_inliers": [
267
+ Interval(Integral, 0, None, closed="left"),
268
+ Options(Real, {np.inf}),
269
+ ],
270
+ "stop_score": [Interval(Real, None, None, closed="both")],
271
+ "stop_probability": [Interval(Real, 0, 1, closed="both")],
272
+ "loss": [StrOptions({"absolute_error", "squared_error"}), callable],
273
+ "random_state": ["random_state"],
274
+ }
275
+
276
+ def __init__(
277
+ self,
278
+ estimator=None,
279
+ *,
280
+ min_samples=None,
281
+ residual_threshold=None,
282
+ is_data_valid=None,
283
+ is_model_valid=None,
284
+ max_trials=100,
285
+ max_skips=np.inf,
286
+ stop_n_inliers=np.inf,
287
+ stop_score=np.inf,
288
+ stop_probability=0.99,
289
+ loss="absolute_error",
290
+ random_state=None,
291
+ ):
292
+ self.estimator = estimator
293
+ self.min_samples = min_samples
294
+ self.residual_threshold = residual_threshold
295
+ self.is_data_valid = is_data_valid
296
+ self.is_model_valid = is_model_valid
297
+ self.max_trials = max_trials
298
+ self.max_skips = max_skips
299
+ self.stop_n_inliers = stop_n_inliers
300
+ self.stop_score = stop_score
301
+ self.stop_probability = stop_probability
302
+ self.random_state = random_state
303
+ self.loss = loss
304
+
305
+ @_fit_context(
306
+ # RansacRegressor.estimator is not validated yet
307
+ prefer_skip_nested_validation=False
308
+ )
309
+ def fit(self, X, y, sample_weight=None):
310
+ """Fit estimator using RANSAC algorithm.
311
+
312
+ Parameters
313
+ ----------
314
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
315
+ Training data.
316
+
317
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
318
+ Target values.
319
+
320
+ sample_weight : array-like of shape (n_samples,), default=None
321
+ Individual weights for each sample
322
+ raises error if sample_weight is passed and estimator
323
+ fit method does not support it.
324
+
325
+ .. versionadded:: 0.18
326
+
327
+ Returns
328
+ -------
329
+ self : object
330
+ Fitted `RANSACRegressor` estimator.
331
+
332
+ Raises
333
+ ------
334
+ ValueError
335
+ If no valid consensus set could be found. This occurs if
336
+ `is_data_valid` and `is_model_valid` return False for all
337
+ `max_trials` randomly chosen sub-samples.
338
+ """
339
+ _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
340
+ # Need to validate separately here. We can't pass multi_output=True
341
+ # because that would allow y to be csr. Delay expensive finiteness
342
+ # check to the estimator's own input validation.
343
+ check_X_params = dict(accept_sparse="csr", force_all_finite=False)
344
+ check_y_params = dict(ensure_2d=False)
345
+ X, y = self._validate_data(
346
+ X, y, validate_separately=(check_X_params, check_y_params)
347
+ )
348
+ check_consistent_length(X, y)
349
+
350
+ if self.estimator is not None:
351
+ estimator = clone(self.estimator)
352
+ else:
353
+ estimator = LinearRegression()
354
+
355
+ if self.min_samples is None:
356
+ if not isinstance(estimator, LinearRegression):
357
+ raise ValueError(
358
+ "`min_samples` needs to be explicitly set when estimator "
359
+ "is not a LinearRegression."
360
+ )
361
+ min_samples = X.shape[1] + 1
362
+ elif 0 < self.min_samples < 1:
363
+ min_samples = np.ceil(self.min_samples * X.shape[0])
364
+ elif self.min_samples >= 1:
365
+ min_samples = self.min_samples
366
+ if min_samples > X.shape[0]:
367
+ raise ValueError(
368
+ "`min_samples` may not be larger than number "
369
+ "of samples: n_samples = %d." % (X.shape[0])
370
+ )
371
+
372
+ if self.residual_threshold is None:
373
+ # MAD (median absolute deviation)
374
+ residual_threshold = np.median(np.abs(y - np.median(y)))
375
+ else:
376
+ residual_threshold = self.residual_threshold
377
+
378
+ if self.loss == "absolute_error":
379
+ if y.ndim == 1:
380
+ loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
381
+ else:
382
+ loss_function = lambda y_true, y_pred: np.sum(
383
+ np.abs(y_true - y_pred), axis=1
384
+ )
385
+ elif self.loss == "squared_error":
386
+ if y.ndim == 1:
387
+ loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
388
+ else:
389
+ loss_function = lambda y_true, y_pred: np.sum(
390
+ (y_true - y_pred) ** 2, axis=1
391
+ )
392
+
393
+ elif callable(self.loss):
394
+ loss_function = self.loss
395
+
396
+ random_state = check_random_state(self.random_state)
397
+
398
+ try: # Not all estimator accept a random_state
399
+ estimator.set_params(random_state=random_state)
400
+ except ValueError:
401
+ pass
402
+
403
+ estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight")
404
+ estimator_name = type(estimator).__name__
405
+ if sample_weight is not None and not estimator_fit_has_sample_weight:
406
+ raise ValueError(
407
+ "%s does not support sample_weight. Samples"
408
+ " weights are only used for the calibration"
409
+ " itself." % estimator_name
410
+ )
411
+ if sample_weight is not None:
412
+ sample_weight = _check_sample_weight(sample_weight, X)
413
+
414
+ n_inliers_best = 1
415
+ score_best = -np.inf
416
+ inlier_mask_best = None
417
+ X_inlier_best = None
418
+ y_inlier_best = None
419
+ inlier_best_idxs_subset = None
420
+ self.n_skips_no_inliers_ = 0
421
+ self.n_skips_invalid_data_ = 0
422
+ self.n_skips_invalid_model_ = 0
423
+
424
+ # number of data samples
425
+ n_samples = X.shape[0]
426
+ sample_idxs = np.arange(n_samples)
427
+
428
+ self.n_trials_ = 0
429
+ max_trials = self.max_trials
430
+ while self.n_trials_ < max_trials:
431
+ self.n_trials_ += 1
432
+
433
+ if (
434
+ self.n_skips_no_inliers_
435
+ + self.n_skips_invalid_data_
436
+ + self.n_skips_invalid_model_
437
+ ) > self.max_skips:
438
+ break
439
+
440
+ # choose random sample set
441
+ subset_idxs = sample_without_replacement(
442
+ n_samples, min_samples, random_state=random_state
443
+ )
444
+ X_subset = X[subset_idxs]
445
+ y_subset = y[subset_idxs]
446
+
447
+ # check if random sample set is valid
448
+ if self.is_data_valid is not None and not self.is_data_valid(
449
+ X_subset, y_subset
450
+ ):
451
+ self.n_skips_invalid_data_ += 1
452
+ continue
453
+
454
+ # fit model for current random sample set
455
+ if sample_weight is None:
456
+ estimator.fit(X_subset, y_subset)
457
+ else:
458
+ estimator.fit(
459
+ X_subset, y_subset, sample_weight=sample_weight[subset_idxs]
460
+ )
461
+
462
+ # check if estimated model is valid
463
+ if self.is_model_valid is not None and not self.is_model_valid(
464
+ estimator, X_subset, y_subset
465
+ ):
466
+ self.n_skips_invalid_model_ += 1
467
+ continue
468
+
469
+ # residuals of all data for current random sample model
470
+ y_pred = estimator.predict(X)
471
+ residuals_subset = loss_function(y, y_pred)
472
+
473
+ # classify data into inliers and outliers
474
+ inlier_mask_subset = residuals_subset <= residual_threshold
475
+ n_inliers_subset = np.sum(inlier_mask_subset)
476
+
477
+ # less inliers -> skip current random sample
478
+ if n_inliers_subset < n_inliers_best:
479
+ self.n_skips_no_inliers_ += 1
480
+ continue
481
+
482
+ # extract inlier data set
483
+ inlier_idxs_subset = sample_idxs[inlier_mask_subset]
484
+ X_inlier_subset = X[inlier_idxs_subset]
485
+ y_inlier_subset = y[inlier_idxs_subset]
486
+
487
+ # score of inlier data set
488
+ score_subset = estimator.score(X_inlier_subset, y_inlier_subset)
489
+
490
+ # same number of inliers but worse score -> skip current random
491
+ # sample
492
+ if n_inliers_subset == n_inliers_best and score_subset < score_best:
493
+ continue
494
+
495
+ # save current random sample as best sample
496
+ n_inliers_best = n_inliers_subset
497
+ score_best = score_subset
498
+ inlier_mask_best = inlier_mask_subset
499
+ X_inlier_best = X_inlier_subset
500
+ y_inlier_best = y_inlier_subset
501
+ inlier_best_idxs_subset = inlier_idxs_subset
502
+
503
+ max_trials = min(
504
+ max_trials,
505
+ _dynamic_max_trials(
506
+ n_inliers_best, n_samples, min_samples, self.stop_probability
507
+ ),
508
+ )
509
+
510
+ # break if sufficient number of inliers or score is reached
511
+ if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score:
512
+ break
513
+
514
+ # if none of the iterations met the required criteria
515
+ if inlier_mask_best is None:
516
+ if (
517
+ self.n_skips_no_inliers_
518
+ + self.n_skips_invalid_data_
519
+ + self.n_skips_invalid_model_
520
+ ) > self.max_skips:
521
+ raise ValueError(
522
+ "RANSAC skipped more iterations than `max_skips` without"
523
+ " finding a valid consensus set. Iterations were skipped"
524
+ " because each randomly chosen sub-sample failed the"
525
+ " passing criteria. See estimator attributes for"
526
+ " diagnostics (n_skips*)."
527
+ )
528
+ else:
529
+ raise ValueError(
530
+ "RANSAC could not find a valid consensus set. All"
531
+ " `max_trials` iterations were skipped because each"
532
+ " randomly chosen sub-sample failed the passing criteria."
533
+ " See estimator attributes for diagnostics (n_skips*)."
534
+ )
535
+ else:
536
+ if (
537
+ self.n_skips_no_inliers_
538
+ + self.n_skips_invalid_data_
539
+ + self.n_skips_invalid_model_
540
+ ) > self.max_skips:
541
+ warnings.warn(
542
+ (
543
+ "RANSAC found a valid consensus set but exited"
544
+ " early due to skipping more iterations than"
545
+ " `max_skips`. See estimator attributes for"
546
+ " diagnostics (n_skips*)."
547
+ ),
548
+ ConvergenceWarning,
549
+ )
550
+
551
+ # estimate final model using all inliers
552
+ if sample_weight is None:
553
+ estimator.fit(X_inlier_best, y_inlier_best)
554
+ else:
555
+ estimator.fit(
556
+ X_inlier_best,
557
+ y_inlier_best,
558
+ sample_weight=sample_weight[inlier_best_idxs_subset],
559
+ )
560
+
561
+ self.estimator_ = estimator
562
+ self.inlier_mask_ = inlier_mask_best
563
+ return self
564
+
565
+ def predict(self, X):
566
+ """Predict using the estimated model.
567
+
568
+ This is a wrapper for `estimator_.predict(X)`.
569
+
570
+ Parameters
571
+ ----------
572
+ X : {array-like or sparse matrix} of shape (n_samples, n_features)
573
+ Input data.
574
+
575
+ Returns
576
+ -------
577
+ y : array, shape = [n_samples] or [n_samples, n_targets]
578
+ Returns predicted values.
579
+ """
580
+ check_is_fitted(self)
581
+ X = self._validate_data(
582
+ X,
583
+ force_all_finite=False,
584
+ accept_sparse=True,
585
+ reset=False,
586
+ )
587
+ return self.estimator_.predict(X)
588
+
589
+ def score(self, X, y):
590
+ """Return the score of the prediction.
591
+
592
+ This is a wrapper for `estimator_.score(X, y)`.
593
+
594
+ Parameters
595
+ ----------
596
+ X : (array-like or sparse matrix} of shape (n_samples, n_features)
597
+ Training data.
598
+
599
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
600
+ Target values.
601
+
602
+ Returns
603
+ -------
604
+ z : float
605
+ Score of the prediction.
606
+ """
607
+ check_is_fitted(self)
608
+ X = self._validate_data(
609
+ X,
610
+ force_all_finite=False,
611
+ accept_sparse=True,
612
+ reset=False,
613
+ )
614
+ return self.estimator_.score(X, y)
615
+
616
+ def _more_tags(self):
617
+ return {
618
+ "_xfail_checks": {
619
+ "check_sample_weights_invariance": (
620
+ "zero sample_weight is not equivalent to removing samples"
621
+ ),
622
+ }
623
+ }
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py ADDED
@@ -0,0 +1,2612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ridge regression
3
+ """
4
+
5
+ # Author: Mathieu Blondel <[email protected]>
6
+ # Reuben Fletcher-Costin <[email protected]>
7
+ # Fabian Pedregosa <[email protected]>
8
+ # Michael Eickenberg <[email protected]>
9
+ # License: BSD 3 clause
10
+
11
+
12
+ import numbers
13
+ import warnings
14
+ from abc import ABCMeta, abstractmethod
15
+ from functools import partial
16
+ from numbers import Integral, Real
17
+
18
+ import numpy as np
19
+ from scipy import linalg, optimize, sparse
20
+ from scipy.sparse import linalg as sp_linalg
21
+
22
+ from ..base import MultiOutputMixin, RegressorMixin, _fit_context, is_classifier
23
+ from ..exceptions import ConvergenceWarning
24
+ from ..metrics import check_scoring, get_scorer_names
25
+ from ..model_selection import GridSearchCV
26
+ from ..preprocessing import LabelBinarizer
27
+ from ..utils import (
28
+ check_array,
29
+ check_consistent_length,
30
+ check_scalar,
31
+ column_or_1d,
32
+ compute_sample_weight,
33
+ )
34
+ from ..utils._param_validation import Interval, StrOptions, validate_params
35
+ from ..utils.extmath import row_norms, safe_sparse_dot
36
+ from ..utils.fixes import _sparse_linalg_cg
37
+ from ..utils.metadata_routing import (
38
+ _raise_for_unsupported_routing,
39
+ _RoutingNotSupportedMixin,
40
+ )
41
+ from ..utils.sparsefuncs import mean_variance_axis
42
+ from ..utils.validation import _check_sample_weight, check_is_fitted
43
+ from ._base import LinearClassifierMixin, LinearModel, _preprocess_data, _rescale_data
44
+ from ._sag import sag_solver
45
+
46
+
47
+ def _get_rescaled_operator(X, X_offset, sample_weight_sqrt):
48
+ """Create LinearOperator for matrix products with implicit centering.
49
+
50
+ Matrix product `LinearOperator @ coef` returns `(X - X_offset) @ coef`.
51
+ """
52
+
53
+ def matvec(b):
54
+ return X.dot(b) - sample_weight_sqrt * b.dot(X_offset)
55
+
56
+ def rmatvec(b):
57
+ return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt)
58
+
59
+ X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
60
+ return X1
61
+
62
+
63
+ def _solve_sparse_cg(
64
+ X,
65
+ y,
66
+ alpha,
67
+ max_iter=None,
68
+ tol=1e-4,
69
+ verbose=0,
70
+ X_offset=None,
71
+ X_scale=None,
72
+ sample_weight_sqrt=None,
73
+ ):
74
+ if sample_weight_sqrt is None:
75
+ sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
76
+
77
+ n_samples, n_features = X.shape
78
+
79
+ if X_offset is None or X_scale is None:
80
+ X1 = sp_linalg.aslinearoperator(X)
81
+ else:
82
+ X_offset_scale = X_offset / X_scale
83
+ X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt)
84
+
85
+ coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
86
+
87
+ if n_features > n_samples:
88
+
89
+ def create_mv(curr_alpha):
90
+ def _mv(x):
91
+ return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
92
+
93
+ return _mv
94
+
95
+ else:
96
+
97
+ def create_mv(curr_alpha):
98
+ def _mv(x):
99
+ return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
100
+
101
+ return _mv
102
+
103
+ for i in range(y.shape[1]):
104
+ y_column = y[:, i]
105
+
106
+ mv = create_mv(alpha[i])
107
+ if n_features > n_samples:
108
+ # kernel ridge
109
+ # w = X.T * inv(X X^t + alpha*Id) y
110
+ C = sp_linalg.LinearOperator(
111
+ (n_samples, n_samples), matvec=mv, dtype=X.dtype
112
+ )
113
+ coef, info = _sparse_linalg_cg(C, y_column, rtol=tol)
114
+ coefs[i] = X1.rmatvec(coef)
115
+ else:
116
+ # linear ridge
117
+ # w = inv(X^t X + alpha*Id) * X.T y
118
+ y_column = X1.rmatvec(y_column)
119
+ C = sp_linalg.LinearOperator(
120
+ (n_features, n_features), matvec=mv, dtype=X.dtype
121
+ )
122
+ coefs[i], info = _sparse_linalg_cg(C, y_column, maxiter=max_iter, rtol=tol)
123
+
124
+ if info < 0:
125
+ raise ValueError("Failed with error code %d" % info)
126
+
127
+ if max_iter is None and info > 0 and verbose:
128
+ warnings.warn(
129
+ "sparse_cg did not converge after %d iterations." % info,
130
+ ConvergenceWarning,
131
+ )
132
+
133
+ return coefs
134
+
135
+
136
+ def _solve_lsqr(
137
+ X,
138
+ y,
139
+ *,
140
+ alpha,
141
+ fit_intercept=True,
142
+ max_iter=None,
143
+ tol=1e-4,
144
+ X_offset=None,
145
+ X_scale=None,
146
+ sample_weight_sqrt=None,
147
+ ):
148
+ """Solve Ridge regression via LSQR.
149
+
150
+ We expect that y is always mean centered.
151
+ If X is dense, we expect it to be mean centered such that we can solve
152
+ ||y - Xw||_2^2 + alpha * ||w||_2^2
153
+
154
+ If X is sparse, we expect X_offset to be given such that we can solve
155
+ ||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2
156
+
157
+ With sample weights S=diag(sample_weight), this becomes
158
+ ||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2
159
+ and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In
160
+ this case, X_offset is the sample_weight weighted mean of X before scaling by
161
+ sqrt(S). The objective then reads
162
+ ||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2
163
+ """
164
+ if sample_weight_sqrt is None:
165
+ sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
166
+
167
+ if sparse.issparse(X) and fit_intercept:
168
+ X_offset_scale = X_offset / X_scale
169
+ X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt)
170
+ else:
171
+ # No need to touch anything
172
+ X1 = X
173
+
174
+ n_samples, n_features = X.shape
175
+ coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
176
+ n_iter = np.empty(y.shape[1], dtype=np.int32)
177
+
178
+ # According to the lsqr documentation, alpha = damp^2.
179
+ sqrt_alpha = np.sqrt(alpha)
180
+
181
+ for i in range(y.shape[1]):
182
+ y_column = y[:, i]
183
+ info = sp_linalg.lsqr(
184
+ X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter
185
+ )
186
+ coefs[i] = info[0]
187
+ n_iter[i] = info[2]
188
+
189
+ return coefs, n_iter
190
+
191
+
192
+ def _solve_cholesky(X, y, alpha):
193
+ # w = inv(X^t X + alpha*Id) * X.T y
194
+ n_features = X.shape[1]
195
+ n_targets = y.shape[1]
196
+
197
+ A = safe_sparse_dot(X.T, X, dense_output=True)
198
+ Xy = safe_sparse_dot(X.T, y, dense_output=True)
199
+
200
+ one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
201
+
202
+ if one_alpha:
203
+ A.flat[:: n_features + 1] += alpha[0]
204
+ return linalg.solve(A, Xy, assume_a="pos", overwrite_a=True).T
205
+ else:
206
+ coefs = np.empty([n_targets, n_features], dtype=X.dtype)
207
+ for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
208
+ A.flat[:: n_features + 1] += current_alpha
209
+ coef[:] = linalg.solve(A, target, assume_a="pos", overwrite_a=False).ravel()
210
+ A.flat[:: n_features + 1] -= current_alpha
211
+ return coefs
212
+
213
+
214
+ def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
215
+ # dual_coef = inv(X X^t + alpha*Id) y
216
+ n_samples = K.shape[0]
217
+ n_targets = y.shape[1]
218
+
219
+ if copy:
220
+ K = K.copy()
221
+
222
+ alpha = np.atleast_1d(alpha)
223
+ one_alpha = (alpha == alpha[0]).all()
224
+ has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None]
225
+
226
+ if has_sw:
227
+ # Unlike other solvers, we need to support sample_weight directly
228
+ # because K might be a pre-computed kernel.
229
+ sw = np.sqrt(np.atleast_1d(sample_weight))
230
+ y = y * sw[:, np.newaxis]
231
+ K *= np.outer(sw, sw)
232
+
233
+ if one_alpha:
234
+ # Only one penalty, we can solve multi-target problems in one time.
235
+ K.flat[:: n_samples + 1] += alpha[0]
236
+
237
+ try:
238
+ # Note: we must use overwrite_a=False in order to be able to
239
+ # use the fall-back solution below in case a LinAlgError
240
+ # is raised
241
+ dual_coef = linalg.solve(K, y, assume_a="pos", overwrite_a=False)
242
+ except np.linalg.LinAlgError:
243
+ warnings.warn(
244
+ "Singular matrix in solving dual problem. Using "
245
+ "least-squares solution instead."
246
+ )
247
+ dual_coef = linalg.lstsq(K, y)[0]
248
+
249
+ # K is expensive to compute and store in memory so change it back in
250
+ # case it was user-given.
251
+ K.flat[:: n_samples + 1] -= alpha[0]
252
+
253
+ if has_sw:
254
+ dual_coef *= sw[:, np.newaxis]
255
+
256
+ return dual_coef
257
+ else:
258
+ # One penalty per target. We need to solve each target separately.
259
+ dual_coefs = np.empty([n_targets, n_samples], K.dtype)
260
+
261
+ for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
262
+ K.flat[:: n_samples + 1] += current_alpha
263
+
264
+ dual_coef[:] = linalg.solve(
265
+ K, target, assume_a="pos", overwrite_a=False
266
+ ).ravel()
267
+
268
+ K.flat[:: n_samples + 1] -= current_alpha
269
+
270
+ if has_sw:
271
+ dual_coefs *= sw[np.newaxis, :]
272
+
273
+ return dual_coefs.T
274
+
275
+
276
+ def _solve_svd(X, y, alpha):
277
+ U, s, Vt = linalg.svd(X, full_matrices=False)
278
+ idx = s > 1e-15 # same default value as scipy.linalg.pinv
279
+ s_nnz = s[idx][:, np.newaxis]
280
+ UTy = np.dot(U.T, y)
281
+ d = np.zeros((s.size, alpha.size), dtype=X.dtype)
282
+ d[idx] = s_nnz / (s_nnz**2 + alpha)
283
+ d_UT_y = d * UTy
284
+ return np.dot(Vt.T, d_UT_y).T
285
+
286
+
287
+ def _solve_lbfgs(
288
+ X,
289
+ y,
290
+ alpha,
291
+ positive=True,
292
+ max_iter=None,
293
+ tol=1e-4,
294
+ X_offset=None,
295
+ X_scale=None,
296
+ sample_weight_sqrt=None,
297
+ ):
298
+ """Solve ridge regression with LBFGS.
299
+
300
+ The main purpose is fitting with forcing coefficients to be positive.
301
+ For unconstrained ridge regression, there are faster dedicated solver methods.
302
+ Note that with positive bounds on the coefficients, LBFGS seems faster
303
+ than scipy.optimize.lsq_linear.
304
+ """
305
+ n_samples, n_features = X.shape
306
+
307
+ options = {}
308
+ if max_iter is not None:
309
+ options["maxiter"] = max_iter
310
+ config = {
311
+ "method": "L-BFGS-B",
312
+ "tol": tol,
313
+ "jac": True,
314
+ "options": options,
315
+ }
316
+ if positive:
317
+ config["bounds"] = [(0, np.inf)] * n_features
318
+
319
+ if X_offset is not None and X_scale is not None:
320
+ X_offset_scale = X_offset / X_scale
321
+ else:
322
+ X_offset_scale = None
323
+
324
+ if sample_weight_sqrt is None:
325
+ sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
326
+
327
+ coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
328
+
329
+ for i in range(y.shape[1]):
330
+ x0 = np.zeros((n_features,))
331
+ y_column = y[:, i]
332
+
333
+ def func(w):
334
+ residual = X.dot(w) - y_column
335
+ if X_offset_scale is not None:
336
+ residual -= sample_weight_sqrt * w.dot(X_offset_scale)
337
+ f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
338
+ grad = X.T @ residual + alpha[i] * w
339
+ if X_offset_scale is not None:
340
+ grad -= X_offset_scale * residual.dot(sample_weight_sqrt)
341
+
342
+ return f, grad
343
+
344
+ result = optimize.minimize(func, x0, **config)
345
+ if not result["success"]:
346
+ warnings.warn(
347
+ (
348
+ "The lbfgs solver did not converge. Try increasing max_iter "
349
+ f"or tol. Currently: max_iter={max_iter} and tol={tol}"
350
+ ),
351
+ ConvergenceWarning,
352
+ )
353
+ coefs[i] = result["x"]
354
+
355
+ return coefs
356
+
357
+
358
+ def _get_valid_accept_sparse(is_X_sparse, solver):
359
+ if is_X_sparse and solver in ["auto", "sag", "saga"]:
360
+ return "csr"
361
+ else:
362
+ return ["csr", "csc", "coo"]
363
+
364
+
365
+ @validate_params(
366
+ {
367
+ "X": ["array-like", "sparse matrix", sp_linalg.LinearOperator],
368
+ "y": ["array-like"],
369
+ "alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
370
+ "sample_weight": [
371
+ Interval(Real, None, None, closed="neither"),
372
+ "array-like",
373
+ None,
374
+ ],
375
+ "solver": [
376
+ StrOptions(
377
+ {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"}
378
+ )
379
+ ],
380
+ "max_iter": [Interval(Integral, 0, None, closed="left"), None],
381
+ "tol": [Interval(Real, 0, None, closed="left")],
382
+ "verbose": ["verbose"],
383
+ "positive": ["boolean"],
384
+ "random_state": ["random_state"],
385
+ "return_n_iter": ["boolean"],
386
+ "return_intercept": ["boolean"],
387
+ "check_input": ["boolean"],
388
+ },
389
+ prefer_skip_nested_validation=True,
390
+ )
391
+ def ridge_regression(
392
+ X,
393
+ y,
394
+ alpha,
395
+ *,
396
+ sample_weight=None,
397
+ solver="auto",
398
+ max_iter=None,
399
+ tol=1e-4,
400
+ verbose=0,
401
+ positive=False,
402
+ random_state=None,
403
+ return_n_iter=False,
404
+ return_intercept=False,
405
+ check_input=True,
406
+ ):
407
+ """Solve the ridge equation by the method of normal equations.
408
+
409
+ Read more in the :ref:`User Guide <ridge_regression>`.
410
+
411
+ Parameters
412
+ ----------
413
+ X : {array-like, sparse matrix, LinearOperator} of shape \
414
+ (n_samples, n_features)
415
+ Training data.
416
+
417
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
418
+ Target values.
419
+
420
+ alpha : float or array-like of shape (n_targets,)
421
+ Constant that multiplies the L2 term, controlling regularization
422
+ strength. `alpha` must be a non-negative float i.e. in `[0, inf)`.
423
+
424
+ When `alpha = 0`, the objective is equivalent to ordinary least
425
+ squares, solved by the :class:`LinearRegression` object. For numerical
426
+ reasons, using `alpha = 0` with the `Ridge` object is not advised.
427
+ Instead, you should use the :class:`LinearRegression` object.
428
+
429
+ If an array is passed, penalties are assumed to be specific to the
430
+ targets. Hence they must correspond in number.
431
+
432
+ sample_weight : float or array-like of shape (n_samples,), default=None
433
+ Individual weights for each sample. If given a float, every sample
434
+ will have the same weight. If sample_weight is not None and
435
+ solver='auto', the solver will be set to 'cholesky'.
436
+
437
+ .. versionadded:: 0.17
438
+
439
+ solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
440
+ 'sag', 'saga', 'lbfgs'}, default='auto'
441
+ Solver to use in the computational routines:
442
+
443
+ - 'auto' chooses the solver automatically based on the type of data.
444
+
445
+ - 'svd' uses a Singular Value Decomposition of X to compute the Ridge
446
+ coefficients. It is the most stable solver, in particular more stable
447
+ for singular matrices than 'cholesky' at the cost of being slower.
448
+
449
+ - 'cholesky' uses the standard scipy.linalg.solve function to
450
+ obtain a closed-form solution via a Cholesky decomposition of
451
+ dot(X.T, X)
452
+
453
+ - 'sparse_cg' uses the conjugate gradient solver as found in
454
+ scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
455
+ more appropriate than 'cholesky' for large-scale data
456
+ (possibility to set `tol` and `max_iter`).
457
+
458
+ - 'lsqr' uses the dedicated regularized least-squares routine
459
+ scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
460
+ procedure.
461
+
462
+ - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
463
+ its improved, unbiased version named SAGA. Both methods also use an
464
+ iterative procedure, and are often faster than other solvers when
465
+ both n_samples and n_features are large. Note that 'sag' and
466
+ 'saga' fast convergence is only guaranteed on features with
467
+ approximately the same scale. You can preprocess the data with a
468
+ scaler from sklearn.preprocessing.
469
+
470
+ - 'lbfgs' uses L-BFGS-B algorithm implemented in
471
+ `scipy.optimize.minimize`. It can be used only when `positive`
472
+ is True.
473
+
474
+ All solvers except 'svd' support both dense and sparse data. However, only
475
+ 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when
476
+ `fit_intercept` is True.
477
+
478
+ .. versionadded:: 0.17
479
+ Stochastic Average Gradient descent solver.
480
+ .. versionadded:: 0.19
481
+ SAGA solver.
482
+
483
+ max_iter : int, default=None
484
+ Maximum number of iterations for conjugate gradient solver.
485
+ For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
486
+ by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
487
+ 1000. For 'lbfgs' solver, the default value is 15000.
488
+
489
+ tol : float, default=1e-4
490
+ Precision of the solution. Note that `tol` has no effect for solvers 'svd' and
491
+ 'cholesky'.
492
+
493
+ .. versionchanged:: 1.2
494
+ Default value changed from 1e-3 to 1e-4 for consistency with other linear
495
+ models.
496
+
497
+ verbose : int, default=0
498
+ Verbosity level. Setting verbose > 0 will display additional
499
+ information depending on the solver used.
500
+
501
+ positive : bool, default=False
502
+ When set to ``True``, forces the coefficients to be positive.
503
+ Only 'lbfgs' solver is supported in this case.
504
+
505
+ random_state : int, RandomState instance, default=None
506
+ Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
507
+ See :term:`Glossary <random_state>` for details.
508
+
509
+ return_n_iter : bool, default=False
510
+ If True, the method also returns `n_iter`, the actual number of
511
+ iteration performed by the solver.
512
+
513
+ .. versionadded:: 0.17
514
+
515
+ return_intercept : bool, default=False
516
+ If True and if X is sparse, the method also returns the intercept,
517
+ and the solver is automatically changed to 'sag'. This is only a
518
+ temporary fix for fitting the intercept with sparse data. For dense
519
+ data, use sklearn.linear_model._preprocess_data before your regression.
520
+
521
+ .. versionadded:: 0.17
522
+
523
+ check_input : bool, default=True
524
+ If False, the input arrays X and y will not be checked.
525
+
526
+ .. versionadded:: 0.21
527
+
528
+ Returns
529
+ -------
530
+ coef : ndarray of shape (n_features,) or (n_targets, n_features)
531
+ Weight vector(s).
532
+
533
+ n_iter : int, optional
534
+ The actual number of iteration performed by the solver.
535
+ Only returned if `return_n_iter` is True.
536
+
537
+ intercept : float or ndarray of shape (n_targets,)
538
+ The intercept of the model. Only returned if `return_intercept`
539
+ is True and if X is a scipy sparse array.
540
+
541
+ Notes
542
+ -----
543
+ This function won't compute the intercept.
544
+
545
+ Regularization improves the conditioning of the problem and
546
+ reduces the variance of the estimates. Larger values specify stronger
547
+ regularization. Alpha corresponds to ``1 / (2C)`` in other linear
548
+ models such as :class:`~sklearn.linear_model.LogisticRegression` or
549
+ :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
550
+ assumed to be specific to the targets. Hence they must correspond in
551
+ number.
552
+
553
+ Examples
554
+ --------
555
+ >>> import numpy as np
556
+ >>> from sklearn.datasets import make_regression
557
+ >>> from sklearn.linear_model import ridge_regression
558
+ >>> rng = np.random.RandomState(0)
559
+ >>> X = rng.randn(100, 4)
560
+ >>> y = 2.0 * X[:, 0] - 1.0 * X[:, 1] + 0.1 * rng.standard_normal(100)
561
+ >>> coef, intercept = ridge_regression(X, y, alpha=1.0, return_intercept=True)
562
+ >>> list(coef)
563
+ [1.9..., -1.0..., -0.0..., -0.0...]
564
+ >>> intercept
565
+ -0.0...
566
+ """
567
+ return _ridge_regression(
568
+ X,
569
+ y,
570
+ alpha,
571
+ sample_weight=sample_weight,
572
+ solver=solver,
573
+ max_iter=max_iter,
574
+ tol=tol,
575
+ verbose=verbose,
576
+ positive=positive,
577
+ random_state=random_state,
578
+ return_n_iter=return_n_iter,
579
+ return_intercept=return_intercept,
580
+ X_scale=None,
581
+ X_offset=None,
582
+ check_input=check_input,
583
+ )
584
+
585
+
586
+ def _ridge_regression(
587
+ X,
588
+ y,
589
+ alpha,
590
+ sample_weight=None,
591
+ solver="auto",
592
+ max_iter=None,
593
+ tol=1e-4,
594
+ verbose=0,
595
+ positive=False,
596
+ random_state=None,
597
+ return_n_iter=False,
598
+ return_intercept=False,
599
+ X_scale=None,
600
+ X_offset=None,
601
+ check_input=True,
602
+ fit_intercept=False,
603
+ ):
604
+ has_sw = sample_weight is not None
605
+
606
+ if solver == "auto":
607
+ if positive:
608
+ solver = "lbfgs"
609
+ elif return_intercept:
610
+ # sag supports fitting intercept directly
611
+ solver = "sag"
612
+ elif not sparse.issparse(X):
613
+ solver = "cholesky"
614
+ else:
615
+ solver = "sparse_cg"
616
+
617
+ if solver not in ("sparse_cg", "cholesky", "svd", "lsqr", "sag", "saga", "lbfgs"):
618
+ raise ValueError(
619
+ "Known solvers are 'sparse_cg', 'cholesky', 'svd'"
620
+ " 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver
621
+ )
622
+
623
+ if positive and solver != "lbfgs":
624
+ raise ValueError(
625
+ "When positive=True, only 'lbfgs' solver can be used. "
626
+ f"Please change solver {solver} to 'lbfgs' "
627
+ "or set positive=False."
628
+ )
629
+
630
+ if solver == "lbfgs" and not positive:
631
+ raise ValueError(
632
+ "'lbfgs' solver can be used only when positive=True. "
633
+ "Please use another solver."
634
+ )
635
+
636
+ if return_intercept and solver != "sag":
637
+ raise ValueError(
638
+ "In Ridge, only 'sag' solver can directly fit the "
639
+ "intercept. Please change solver to 'sag' or set "
640
+ "return_intercept=False."
641
+ )
642
+
643
+ if check_input:
644
+ _dtype = [np.float64, np.float32]
645
+ _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
646
+ X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C")
647
+ y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
648
+ check_consistent_length(X, y)
649
+
650
+ n_samples, n_features = X.shape
651
+
652
+ if y.ndim > 2:
653
+ raise ValueError("Target y has the wrong shape %s" % str(y.shape))
654
+
655
+ ravel = False
656
+ if y.ndim == 1:
657
+ y = y.reshape(-1, 1)
658
+ ravel = True
659
+
660
+ n_samples_, n_targets = y.shape
661
+
662
+ if n_samples != n_samples_:
663
+ raise ValueError(
664
+ "Number of samples in X and y does not correspond: %d != %d"
665
+ % (n_samples, n_samples_)
666
+ )
667
+
668
+ if has_sw:
669
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
670
+
671
+ if solver not in ["sag", "saga"]:
672
+ # SAG supports sample_weight directly. For other solvers,
673
+ # we implement sample_weight via a simple rescaling.
674
+ X, y, sample_weight_sqrt = _rescale_data(X, y, sample_weight)
675
+
676
+ # Some callers of this method might pass alpha as single
677
+ # element array which already has been validated.
678
+ if alpha is not None and not isinstance(alpha, np.ndarray):
679
+ alpha = check_scalar(
680
+ alpha,
681
+ "alpha",
682
+ target_type=numbers.Real,
683
+ min_val=0.0,
684
+ include_boundaries="left",
685
+ )
686
+
687
+ # There should be either 1 or n_targets penalties
688
+ alpha = np.asarray(alpha, dtype=X.dtype).ravel()
689
+ if alpha.size not in [1, n_targets]:
690
+ raise ValueError(
691
+ "Number of targets and number of penalties do not correspond: %d != %d"
692
+ % (alpha.size, n_targets)
693
+ )
694
+
695
+ if alpha.size == 1 and n_targets > 1:
696
+ alpha = np.repeat(alpha, n_targets)
697
+
698
+ n_iter = None
699
+ if solver == "sparse_cg":
700
+ coef = _solve_sparse_cg(
701
+ X,
702
+ y,
703
+ alpha,
704
+ max_iter=max_iter,
705
+ tol=tol,
706
+ verbose=verbose,
707
+ X_offset=X_offset,
708
+ X_scale=X_scale,
709
+ sample_weight_sqrt=sample_weight_sqrt if has_sw else None,
710
+ )
711
+
712
+ elif solver == "lsqr":
713
+ coef, n_iter = _solve_lsqr(
714
+ X,
715
+ y,
716
+ alpha=alpha,
717
+ fit_intercept=fit_intercept,
718
+ max_iter=max_iter,
719
+ tol=tol,
720
+ X_offset=X_offset,
721
+ X_scale=X_scale,
722
+ sample_weight_sqrt=sample_weight_sqrt if has_sw else None,
723
+ )
724
+
725
+ elif solver == "cholesky":
726
+ if n_features > n_samples:
727
+ K = safe_sparse_dot(X, X.T, dense_output=True)
728
+ try:
729
+ dual_coef = _solve_cholesky_kernel(K, y, alpha)
730
+
731
+ coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
732
+ except linalg.LinAlgError:
733
+ # use SVD solver if matrix is singular
734
+ solver = "svd"
735
+ else:
736
+ try:
737
+ coef = _solve_cholesky(X, y, alpha)
738
+ except linalg.LinAlgError:
739
+ # use SVD solver if matrix is singular
740
+ solver = "svd"
741
+
742
+ elif solver in ["sag", "saga"]:
743
+ # precompute max_squared_sum for all targets
744
+ max_squared_sum = row_norms(X, squared=True).max()
745
+
746
+ coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
747
+ n_iter = np.empty(y.shape[1], dtype=np.int32)
748
+ intercept = np.zeros((y.shape[1],), dtype=X.dtype)
749
+ for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
750
+ init = {
751
+ "coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype)
752
+ }
753
+ coef_, n_iter_, _ = sag_solver(
754
+ X,
755
+ target.ravel(),
756
+ sample_weight,
757
+ "squared",
758
+ alpha_i,
759
+ 0,
760
+ max_iter,
761
+ tol,
762
+ verbose,
763
+ random_state,
764
+ False,
765
+ max_squared_sum,
766
+ init,
767
+ is_saga=solver == "saga",
768
+ )
769
+ if return_intercept:
770
+ coef[i] = coef_[:-1]
771
+ intercept[i] = coef_[-1]
772
+ else:
773
+ coef[i] = coef_
774
+ n_iter[i] = n_iter_
775
+
776
+ if intercept.shape[0] == 1:
777
+ intercept = intercept[0]
778
+ coef = np.asarray(coef)
779
+
780
+ elif solver == "lbfgs":
781
+ coef = _solve_lbfgs(
782
+ X,
783
+ y,
784
+ alpha,
785
+ positive=positive,
786
+ tol=tol,
787
+ max_iter=max_iter,
788
+ X_offset=X_offset,
789
+ X_scale=X_scale,
790
+ sample_weight_sqrt=sample_weight_sqrt if has_sw else None,
791
+ )
792
+
793
+ if solver == "svd":
794
+ if sparse.issparse(X):
795
+ raise TypeError("SVD solver does not support sparse inputs currently")
796
+ coef = _solve_svd(X, y, alpha)
797
+
798
+ if ravel:
799
+ # When y was passed as a 1d-array, we flatten the coefficients.
800
+ coef = coef.ravel()
801
+
802
+ if return_n_iter and return_intercept:
803
+ return coef, n_iter, intercept
804
+ elif return_intercept:
805
+ return coef, intercept
806
+ elif return_n_iter:
807
+ return coef, n_iter
808
+ else:
809
+ return coef
810
+
811
+
812
+ class _BaseRidge(LinearModel, metaclass=ABCMeta):
813
+ _parameter_constraints: dict = {
814
+ "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray],
815
+ "fit_intercept": ["boolean"],
816
+ "copy_X": ["boolean"],
817
+ "max_iter": [Interval(Integral, 1, None, closed="left"), None],
818
+ "tol": [Interval(Real, 0, None, closed="left")],
819
+ "solver": [
820
+ StrOptions(
821
+ {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"}
822
+ )
823
+ ],
824
+ "positive": ["boolean"],
825
+ "random_state": ["random_state"],
826
+ }
827
+
828
+ @abstractmethod
829
+ def __init__(
830
+ self,
831
+ alpha=1.0,
832
+ *,
833
+ fit_intercept=True,
834
+ copy_X=True,
835
+ max_iter=None,
836
+ tol=1e-4,
837
+ solver="auto",
838
+ positive=False,
839
+ random_state=None,
840
+ ):
841
+ self.alpha = alpha
842
+ self.fit_intercept = fit_intercept
843
+ self.copy_X = copy_X
844
+ self.max_iter = max_iter
845
+ self.tol = tol
846
+ self.solver = solver
847
+ self.positive = positive
848
+ self.random_state = random_state
849
+
850
+ def fit(self, X, y, sample_weight=None):
851
+ if self.solver == "lbfgs" and not self.positive:
852
+ raise ValueError(
853
+ "'lbfgs' solver can be used only when positive=True. "
854
+ "Please use another solver."
855
+ )
856
+
857
+ if self.positive:
858
+ if self.solver not in ["auto", "lbfgs"]:
859
+ raise ValueError(
860
+ f"solver='{self.solver}' does not support positive fitting. Please"
861
+ " set the solver to 'auto' or 'lbfgs', or set `positive=False`"
862
+ )
863
+ else:
864
+ solver = self.solver
865
+ elif sparse.issparse(X) and self.fit_intercept:
866
+ if self.solver not in ["auto", "lbfgs", "lsqr", "sag", "sparse_cg"]:
867
+ raise ValueError(
868
+ "solver='{}' does not support fitting the intercept "
869
+ "on sparse data. Please set the solver to 'auto' or "
870
+ "'lsqr', 'sparse_cg', 'sag', 'lbfgs' "
871
+ "or set `fit_intercept=False`".format(self.solver)
872
+ )
873
+ if self.solver in ["lsqr", "lbfgs"]:
874
+ solver = self.solver
875
+ elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4:
876
+ warnings.warn(
877
+ '"sag" solver requires many iterations to fit '
878
+ "an intercept with sparse inputs. Either set the "
879
+ 'solver to "auto" or "sparse_cg", or set a low '
880
+ '"tol" and a high "max_iter" (especially if inputs are '
881
+ "not standardized)."
882
+ )
883
+ solver = "sag"
884
+ else:
885
+ solver = "sparse_cg"
886
+ else:
887
+ solver = self.solver
888
+
889
+ if sample_weight is not None:
890
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
891
+
892
+ # when X is sparse we only remove offset from y
893
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
894
+ X,
895
+ y,
896
+ fit_intercept=self.fit_intercept,
897
+ copy=self.copy_X,
898
+ sample_weight=sample_weight,
899
+ )
900
+
901
+ if solver == "sag" and sparse.issparse(X) and self.fit_intercept:
902
+ self.coef_, self.n_iter_, self.intercept_ = _ridge_regression(
903
+ X,
904
+ y,
905
+ alpha=self.alpha,
906
+ sample_weight=sample_weight,
907
+ max_iter=self.max_iter,
908
+ tol=self.tol,
909
+ solver="sag",
910
+ positive=self.positive,
911
+ random_state=self.random_state,
912
+ return_n_iter=True,
913
+ return_intercept=True,
914
+ check_input=False,
915
+ )
916
+ # add the offset which was subtracted by _preprocess_data
917
+ self.intercept_ += y_offset
918
+
919
+ else:
920
+ if sparse.issparse(X) and self.fit_intercept:
921
+ # required to fit intercept with sparse_cg and lbfgs solver
922
+ params = {"X_offset": X_offset, "X_scale": X_scale}
923
+ else:
924
+ # for dense matrices or when intercept is set to 0
925
+ params = {}
926
+
927
+ self.coef_, self.n_iter_ = _ridge_regression(
928
+ X,
929
+ y,
930
+ alpha=self.alpha,
931
+ sample_weight=sample_weight,
932
+ max_iter=self.max_iter,
933
+ tol=self.tol,
934
+ solver=solver,
935
+ positive=self.positive,
936
+ random_state=self.random_state,
937
+ return_n_iter=True,
938
+ return_intercept=False,
939
+ check_input=False,
940
+ fit_intercept=self.fit_intercept,
941
+ **params,
942
+ )
943
+ self._set_intercept(X_offset, y_offset, X_scale)
944
+
945
+ return self
946
+
947
+
948
+ class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):
949
+ """Linear least squares with l2 regularization.
950
+
951
+ Minimizes the objective function::
952
+
953
+ ||y - Xw||^2_2 + alpha * ||w||^2_2
954
+
955
+ This model solves a regression model where the loss function is
956
+ the linear least squares function and regularization is given by
957
+ the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
958
+ This estimator has built-in support for multi-variate regression
959
+ (i.e., when y is a 2d-array of shape (n_samples, n_targets)).
960
+
961
+ Read more in the :ref:`User Guide <ridge_regression>`.
962
+
963
+ Parameters
964
+ ----------
965
+ alpha : {float, ndarray of shape (n_targets,)}, default=1.0
966
+ Constant that multiplies the L2 term, controlling regularization
967
+ strength. `alpha` must be a non-negative float i.e. in `[0, inf)`.
968
+
969
+ When `alpha = 0`, the objective is equivalent to ordinary least
970
+ squares, solved by the :class:`LinearRegression` object. For numerical
971
+ reasons, using `alpha = 0` with the `Ridge` object is not advised.
972
+ Instead, you should use the :class:`LinearRegression` object.
973
+
974
+ If an array is passed, penalties are assumed to be specific to the
975
+ targets. Hence they must correspond in number.
976
+
977
+ fit_intercept : bool, default=True
978
+ Whether to fit the intercept for this model. If set
979
+ to false, no intercept will be used in calculations
980
+ (i.e. ``X`` and ``y`` are expected to be centered).
981
+
982
+ copy_X : bool, default=True
983
+ If True, X will be copied; else, it may be overwritten.
984
+
985
+ max_iter : int, default=None
986
+ Maximum number of iterations for conjugate gradient solver.
987
+ For 'sparse_cg' and 'lsqr' solvers, the default value is determined
988
+ by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
989
+ For 'lbfgs' solver, the default value is 15000.
990
+
991
+ tol : float, default=1e-4
992
+ The precision of the solution (`coef_`) is determined by `tol` which
993
+ specifies a different convergence criterion for each solver:
994
+
995
+ - 'svd': `tol` has no impact.
996
+
997
+ - 'cholesky': `tol` has no impact.
998
+
999
+ - 'sparse_cg': norm of residuals smaller than `tol`.
1000
+
1001
+ - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr,
1002
+ which control the norm of the residual vector in terms of the norms of
1003
+ matrix and coefficients.
1004
+
1005
+ - 'sag' and 'saga': relative change of coef smaller than `tol`.
1006
+
1007
+ - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals|
1008
+ smaller than `tol`.
1009
+
1010
+ .. versionchanged:: 1.2
1011
+ Default value changed from 1e-3 to 1e-4 for consistency with other linear
1012
+ models.
1013
+
1014
+ solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
1015
+ 'sag', 'saga', 'lbfgs'}, default='auto'
1016
+ Solver to use in the computational routines:
1017
+
1018
+ - 'auto' chooses the solver automatically based on the type of data.
1019
+
1020
+ - 'svd' uses a Singular Value Decomposition of X to compute the Ridge
1021
+ coefficients. It is the most stable solver, in particular more stable
1022
+ for singular matrices than 'cholesky' at the cost of being slower.
1023
+
1024
+ - 'cholesky' uses the standard scipy.linalg.solve function to
1025
+ obtain a closed-form solution.
1026
+
1027
+ - 'sparse_cg' uses the conjugate gradient solver as found in
1028
+ scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
1029
+ more appropriate than 'cholesky' for large-scale data
1030
+ (possibility to set `tol` and `max_iter`).
1031
+
1032
+ - 'lsqr' uses the dedicated regularized least-squares routine
1033
+ scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
1034
+ procedure.
1035
+
1036
+ - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
1037
+ its improved, unbiased version named SAGA. Both methods also use an
1038
+ iterative procedure, and are often faster than other solvers when
1039
+ both n_samples and n_features are large. Note that 'sag' and
1040
+ 'saga' fast convergence is only guaranteed on features with
1041
+ approximately the same scale. You can preprocess the data with a
1042
+ scaler from sklearn.preprocessing.
1043
+
1044
+ - 'lbfgs' uses L-BFGS-B algorithm implemented in
1045
+ `scipy.optimize.minimize`. It can be used only when `positive`
1046
+ is True.
1047
+
1048
+ All solvers except 'svd' support both dense and sparse data. However, only
1049
+ 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when
1050
+ `fit_intercept` is True.
1051
+
1052
+ .. versionadded:: 0.17
1053
+ Stochastic Average Gradient descent solver.
1054
+ .. versionadded:: 0.19
1055
+ SAGA solver.
1056
+
1057
+ positive : bool, default=False
1058
+ When set to ``True``, forces the coefficients to be positive.
1059
+ Only 'lbfgs' solver is supported in this case.
1060
+
1061
+ random_state : int, RandomState instance, default=None
1062
+ Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
1063
+ See :term:`Glossary <random_state>` for details.
1064
+
1065
+ .. versionadded:: 0.17
1066
+ `random_state` to support Stochastic Average Gradient.
1067
+
1068
+ Attributes
1069
+ ----------
1070
+ coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
1071
+ Weight vector(s).
1072
+
1073
+ intercept_ : float or ndarray of shape (n_targets,)
1074
+ Independent term in decision function. Set to 0.0 if
1075
+ ``fit_intercept = False``.
1076
+
1077
+ n_iter_ : None or ndarray of shape (n_targets,)
1078
+ Actual number of iterations for each target. Available only for
1079
+ sag and lsqr solvers. Other solvers will return None.
1080
+
1081
+ .. versionadded:: 0.17
1082
+
1083
+ n_features_in_ : int
1084
+ Number of features seen during :term:`fit`.
1085
+
1086
+ .. versionadded:: 0.24
1087
+
1088
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1089
+ Names of features seen during :term:`fit`. Defined only when `X`
1090
+ has feature names that are all strings.
1091
+
1092
+ .. versionadded:: 1.0
1093
+
1094
+ See Also
1095
+ --------
1096
+ RidgeClassifier : Ridge classifier.
1097
+ RidgeCV : Ridge regression with built-in cross validation.
1098
+ :class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
1099
+ combines ridge regression with the kernel trick.
1100
+
1101
+ Notes
1102
+ -----
1103
+ Regularization improves the conditioning of the problem and
1104
+ reduces the variance of the estimates. Larger values specify stronger
1105
+ regularization. Alpha corresponds to ``1 / (2C)`` in other linear
1106
+ models such as :class:`~sklearn.linear_model.LogisticRegression` or
1107
+ :class:`~sklearn.svm.LinearSVC`.
1108
+
1109
+ Examples
1110
+ --------
1111
+ >>> from sklearn.linear_model import Ridge
1112
+ >>> import numpy as np
1113
+ >>> n_samples, n_features = 10, 5
1114
+ >>> rng = np.random.RandomState(0)
1115
+ >>> y = rng.randn(n_samples)
1116
+ >>> X = rng.randn(n_samples, n_features)
1117
+ >>> clf = Ridge(alpha=1.0)
1118
+ >>> clf.fit(X, y)
1119
+ Ridge()
1120
+ """
1121
+
1122
+ def __init__(
1123
+ self,
1124
+ alpha=1.0,
1125
+ *,
1126
+ fit_intercept=True,
1127
+ copy_X=True,
1128
+ max_iter=None,
1129
+ tol=1e-4,
1130
+ solver="auto",
1131
+ positive=False,
1132
+ random_state=None,
1133
+ ):
1134
+ super().__init__(
1135
+ alpha=alpha,
1136
+ fit_intercept=fit_intercept,
1137
+ copy_X=copy_X,
1138
+ max_iter=max_iter,
1139
+ tol=tol,
1140
+ solver=solver,
1141
+ positive=positive,
1142
+ random_state=random_state,
1143
+ )
1144
+
1145
+ @_fit_context(prefer_skip_nested_validation=True)
1146
+ def fit(self, X, y, sample_weight=None):
1147
+ """Fit Ridge regression model.
1148
+
1149
+ Parameters
1150
+ ----------
1151
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
1152
+ Training data.
1153
+
1154
+ y : ndarray of shape (n_samples,) or (n_samples, n_targets)
1155
+ Target values.
1156
+
1157
+ sample_weight : float or ndarray of shape (n_samples,), default=None
1158
+ Individual weights for each sample. If given a float, every sample
1159
+ will have the same weight.
1160
+
1161
+ Returns
1162
+ -------
1163
+ self : object
1164
+ Fitted estimator.
1165
+ """
1166
+ _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver)
1167
+ X, y = self._validate_data(
1168
+ X,
1169
+ y,
1170
+ accept_sparse=_accept_sparse,
1171
+ dtype=[np.float64, np.float32],
1172
+ multi_output=True,
1173
+ y_numeric=True,
1174
+ )
1175
+ return super().fit(X, y, sample_weight=sample_weight)
1176
+
1177
+
1178
+ class _RidgeClassifierMixin(LinearClassifierMixin):
1179
+ def _prepare_data(self, X, y, sample_weight, solver):
1180
+ """Validate `X` and `y` and binarize `y`.
1181
+
1182
+ Parameters
1183
+ ----------
1184
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
1185
+ Training data.
1186
+
1187
+ y : ndarray of shape (n_samples,)
1188
+ Target values.
1189
+
1190
+ sample_weight : float or ndarray of shape (n_samples,), default=None
1191
+ Individual weights for each sample. If given a float, every sample
1192
+ will have the same weight.
1193
+
1194
+ solver : str
1195
+ The solver used in `Ridge` to know which sparse format to support.
1196
+
1197
+ Returns
1198
+ -------
1199
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
1200
+ Validated training data.
1201
+
1202
+ y : ndarray of shape (n_samples,)
1203
+ Validated target values.
1204
+
1205
+ sample_weight : ndarray of shape (n_samples,)
1206
+ Validated sample weights.
1207
+
1208
+ Y : ndarray of shape (n_samples, n_classes)
1209
+ The binarized version of `y`.
1210
+ """
1211
+ accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
1212
+ X, y = self._validate_data(
1213
+ X,
1214
+ y,
1215
+ accept_sparse=accept_sparse,
1216
+ multi_output=True,
1217
+ y_numeric=False,
1218
+ )
1219
+
1220
+ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
1221
+ Y = self._label_binarizer.fit_transform(y)
1222
+ if not self._label_binarizer.y_type_.startswith("multilabel"):
1223
+ y = column_or_1d(y, warn=True)
1224
+
1225
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
1226
+ if self.class_weight:
1227
+ sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
1228
+ return X, y, sample_weight, Y
1229
+
1230
+ def predict(self, X):
1231
+ """Predict class labels for samples in `X`.
1232
+
1233
+ Parameters
1234
+ ----------
1235
+ X : {array-like, spare matrix} of shape (n_samples, n_features)
1236
+ The data matrix for which we want to predict the targets.
1237
+
1238
+ Returns
1239
+ -------
1240
+ y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
1241
+ Vector or matrix containing the predictions. In binary and
1242
+ multiclass problems, this is a vector containing `n_samples`. In
1243
+ a multilabel problem, it returns a matrix of shape
1244
+ `(n_samples, n_outputs)`.
1245
+ """
1246
+ check_is_fitted(self, attributes=["_label_binarizer"])
1247
+ if self._label_binarizer.y_type_.startswith("multilabel"):
1248
+ # Threshold such that the negative label is -1 and positive label
1249
+ # is 1 to use the inverse transform of the label binarizer fitted
1250
+ # during fit.
1251
+ scores = 2 * (self.decision_function(X) > 0) - 1
1252
+ return self._label_binarizer.inverse_transform(scores)
1253
+ return super().predict(X)
1254
+
1255
+ @property
1256
+ def classes_(self):
1257
+ """Classes labels."""
1258
+ return self._label_binarizer.classes_
1259
+
1260
+ def _more_tags(self):
1261
+ return {"multilabel": True}
1262
+
1263
+
1264
+ class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge):
1265
+ """Classifier using Ridge regression.
1266
+
1267
+ This classifier first converts the target values into ``{-1, 1}`` and
1268
+ then treats the problem as a regression task (multi-output regression in
1269
+ the multiclass case).
1270
+
1271
+ Read more in the :ref:`User Guide <ridge_regression>`.
1272
+
1273
+ Parameters
1274
+ ----------
1275
+ alpha : float, default=1.0
1276
+ Regularization strength; must be a positive float. Regularization
1277
+ improves the conditioning of the problem and reduces the variance of
1278
+ the estimates. Larger values specify stronger regularization.
1279
+ Alpha corresponds to ``1 / (2C)`` in other linear models such as
1280
+ :class:`~sklearn.linear_model.LogisticRegression` or
1281
+ :class:`~sklearn.svm.LinearSVC`.
1282
+
1283
+ fit_intercept : bool, default=True
1284
+ Whether to calculate the intercept for this model. If set to false, no
1285
+ intercept will be used in calculations (e.g. data is expected to be
1286
+ already centered).
1287
+
1288
+ copy_X : bool, default=True
1289
+ If True, X will be copied; else, it may be overwritten.
1290
+
1291
+ max_iter : int, default=None
1292
+ Maximum number of iterations for conjugate gradient solver.
1293
+ The default value is determined by scipy.sparse.linalg.
1294
+
1295
+ tol : float, default=1e-4
1296
+ The precision of the solution (`coef_`) is determined by `tol` which
1297
+ specifies a different convergence criterion for each solver:
1298
+
1299
+ - 'svd': `tol` has no impact.
1300
+
1301
+ - 'cholesky': `tol` has no impact.
1302
+
1303
+ - 'sparse_cg': norm of residuals smaller than `tol`.
1304
+
1305
+ - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr,
1306
+ which control the norm of the residual vector in terms of the norms of
1307
+ matrix and coefficients.
1308
+
1309
+ - 'sag' and 'saga': relative change of coef smaller than `tol`.
1310
+
1311
+ - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals|
1312
+ smaller than `tol`.
1313
+
1314
+ .. versionchanged:: 1.2
1315
+ Default value changed from 1e-3 to 1e-4 for consistency with other linear
1316
+ models.
1317
+
1318
+ class_weight : dict or 'balanced', default=None
1319
+ Weights associated with classes in the form ``{class_label: weight}``.
1320
+ If not given, all classes are supposed to have weight one.
1321
+
1322
+ The "balanced" mode uses the values of y to automatically adjust
1323
+ weights inversely proportional to class frequencies in the input data
1324
+ as ``n_samples / (n_classes * np.bincount(y))``.
1325
+
1326
+ solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
1327
+ 'sag', 'saga', 'lbfgs'}, default='auto'
1328
+ Solver to use in the computational routines:
1329
+
1330
+ - 'auto' chooses the solver automatically based on the type of data.
1331
+
1332
+ - 'svd' uses a Singular Value Decomposition of X to compute the Ridge
1333
+ coefficients. It is the most stable solver, in particular more stable
1334
+ for singular matrices than 'cholesky' at the cost of being slower.
1335
+
1336
+ - 'cholesky' uses the standard scipy.linalg.solve function to
1337
+ obtain a closed-form solution.
1338
+
1339
+ - 'sparse_cg' uses the conjugate gradient solver as found in
1340
+ scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
1341
+ more appropriate than 'cholesky' for large-scale data
1342
+ (possibility to set `tol` and `max_iter`).
1343
+
1344
+ - 'lsqr' uses the dedicated regularized least-squares routine
1345
+ scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
1346
+ procedure.
1347
+
1348
+ - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
1349
+ its unbiased and more flexible version named SAGA. Both methods
1350
+ use an iterative procedure, and are often faster than other solvers
1351
+ when both n_samples and n_features are large. Note that 'sag' and
1352
+ 'saga' fast convergence is only guaranteed on features with
1353
+ approximately the same scale. You can preprocess the data with a
1354
+ scaler from sklearn.preprocessing.
1355
+
1356
+ .. versionadded:: 0.17
1357
+ Stochastic Average Gradient descent solver.
1358
+ .. versionadded:: 0.19
1359
+ SAGA solver.
1360
+
1361
+ - 'lbfgs' uses L-BFGS-B algorithm implemented in
1362
+ `scipy.optimize.minimize`. It can be used only when `positive`
1363
+ is True.
1364
+
1365
+ positive : bool, default=False
1366
+ When set to ``True``, forces the coefficients to be positive.
1367
+ Only 'lbfgs' solver is supported in this case.
1368
+
1369
+ random_state : int, RandomState instance, default=None
1370
+ Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
1371
+ See :term:`Glossary <random_state>` for details.
1372
+
1373
+ Attributes
1374
+ ----------
1375
+ coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
1376
+ Coefficient of the features in the decision function.
1377
+
1378
+ ``coef_`` is of shape (1, n_features) when the given problem is binary.
1379
+
1380
+ intercept_ : float or ndarray of shape (n_targets,)
1381
+ Independent term in decision function. Set to 0.0 if
1382
+ ``fit_intercept = False``.
1383
+
1384
+ n_iter_ : None or ndarray of shape (n_targets,)
1385
+ Actual number of iterations for each target. Available only for
1386
+ sag and lsqr solvers. Other solvers will return None.
1387
+
1388
+ classes_ : ndarray of shape (n_classes,)
1389
+ The classes labels.
1390
+
1391
+ n_features_in_ : int
1392
+ Number of features seen during :term:`fit`.
1393
+
1394
+ .. versionadded:: 0.24
1395
+
1396
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1397
+ Names of features seen during :term:`fit`. Defined only when `X`
1398
+ has feature names that are all strings.
1399
+
1400
+ .. versionadded:: 1.0
1401
+
1402
+ See Also
1403
+ --------
1404
+ Ridge : Ridge regression.
1405
+ RidgeClassifierCV : Ridge classifier with built-in cross validation.
1406
+
1407
+ Notes
1408
+ -----
1409
+ For multi-class classification, n_class classifiers are trained in
1410
+ a one-versus-all approach. Concretely, this is implemented by taking
1411
+ advantage of the multi-variate response support in Ridge.
1412
+
1413
+ Examples
1414
+ --------
1415
+ >>> from sklearn.datasets import load_breast_cancer
1416
+ >>> from sklearn.linear_model import RidgeClassifier
1417
+ >>> X, y = load_breast_cancer(return_X_y=True)
1418
+ >>> clf = RidgeClassifier().fit(X, y)
1419
+ >>> clf.score(X, y)
1420
+ 0.9595...
1421
+ """
1422
+
1423
+ _parameter_constraints: dict = {
1424
+ **_BaseRidge._parameter_constraints,
1425
+ "class_weight": [dict, StrOptions({"balanced"}), None],
1426
+ }
1427
+
1428
+ def __init__(
1429
+ self,
1430
+ alpha=1.0,
1431
+ *,
1432
+ fit_intercept=True,
1433
+ copy_X=True,
1434
+ max_iter=None,
1435
+ tol=1e-4,
1436
+ class_weight=None,
1437
+ solver="auto",
1438
+ positive=False,
1439
+ random_state=None,
1440
+ ):
1441
+ super().__init__(
1442
+ alpha=alpha,
1443
+ fit_intercept=fit_intercept,
1444
+ copy_X=copy_X,
1445
+ max_iter=max_iter,
1446
+ tol=tol,
1447
+ solver=solver,
1448
+ positive=positive,
1449
+ random_state=random_state,
1450
+ )
1451
+ self.class_weight = class_weight
1452
+
1453
+ @_fit_context(prefer_skip_nested_validation=True)
1454
+ def fit(self, X, y, sample_weight=None):
1455
+ """Fit Ridge classifier model.
1456
+
1457
+ Parameters
1458
+ ----------
1459
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
1460
+ Training data.
1461
+
1462
+ y : ndarray of shape (n_samples,)
1463
+ Target values.
1464
+
1465
+ sample_weight : float or ndarray of shape (n_samples,), default=None
1466
+ Individual weights for each sample. If given a float, every sample
1467
+ will have the same weight.
1468
+
1469
+ .. versionadded:: 0.17
1470
+ *sample_weight* support to RidgeClassifier.
1471
+
1472
+ Returns
1473
+ -------
1474
+ self : object
1475
+ Instance of the estimator.
1476
+ """
1477
+ X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver)
1478
+
1479
+ super().fit(X, Y, sample_weight=sample_weight)
1480
+ return self
1481
+
1482
+
1483
+ def _check_gcv_mode(X, gcv_mode):
1484
+ if gcv_mode in ["eigen", "svd"]:
1485
+ return gcv_mode
1486
+ # if X has more rows than columns, use decomposition of X^T.X,
1487
+ # otherwise X.X^T
1488
+ if X.shape[0] > X.shape[1]:
1489
+ return "svd"
1490
+ return "eigen"
1491
+
1492
+
1493
+ def _find_smallest_angle(query, vectors):
1494
+ """Find the column of vectors that is most aligned with the query.
1495
+
1496
+ Both query and the columns of vectors must have their l2 norm equal to 1.
1497
+
1498
+ Parameters
1499
+ ----------
1500
+ query : ndarray of shape (n_samples,)
1501
+ Normalized query vector.
1502
+
1503
+ vectors : ndarray of shape (n_samples, n_features)
1504
+ Vectors to which we compare query, as columns. Must be normalized.
1505
+ """
1506
+ abs_cosine = np.abs(query.dot(vectors))
1507
+ index = np.argmax(abs_cosine)
1508
+ return index
1509
+
1510
+
1511
+ class _X_CenterStackOp(sparse.linalg.LinearOperator):
1512
+ """Behaves as centered and scaled X with an added intercept column.
1513
+
1514
+ This operator behaves as
1515
+ np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])
1516
+ """
1517
+
1518
+ def __init__(self, X, X_mean, sqrt_sw):
1519
+ n_samples, n_features = X.shape
1520
+ super().__init__(X.dtype, (n_samples, n_features + 1))
1521
+ self.X = X
1522
+ self.X_mean = X_mean
1523
+ self.sqrt_sw = sqrt_sw
1524
+
1525
+ def _matvec(self, v):
1526
+ v = v.ravel()
1527
+ return (
1528
+ safe_sparse_dot(self.X, v[:-1], dense_output=True)
1529
+ - self.sqrt_sw * self.X_mean.dot(v[:-1])
1530
+ + v[-1] * self.sqrt_sw
1531
+ )
1532
+
1533
+ def _matmat(self, v):
1534
+ return (
1535
+ safe_sparse_dot(self.X, v[:-1], dense_output=True)
1536
+ - self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1])
1537
+ + v[-1] * self.sqrt_sw[:, None]
1538
+ )
1539
+
1540
+ def _transpose(self):
1541
+ return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)
1542
+
1543
+
1544
+ class _XT_CenterStackOp(sparse.linalg.LinearOperator):
1545
+ """Behaves as transposed centered and scaled X with an intercept column.
1546
+
1547
+ This operator behaves as
1548
+ np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T
1549
+ """
1550
+
1551
+ def __init__(self, X, X_mean, sqrt_sw):
1552
+ n_samples, n_features = X.shape
1553
+ super().__init__(X.dtype, (n_features + 1, n_samples))
1554
+ self.X = X
1555
+ self.X_mean = X_mean
1556
+ self.sqrt_sw = sqrt_sw
1557
+
1558
+ def _matvec(self, v):
1559
+ v = v.ravel()
1560
+ n_features = self.shape[0]
1561
+ res = np.empty(n_features, dtype=self.X.dtype)
1562
+ res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - (
1563
+ self.X_mean * self.sqrt_sw.dot(v)
1564
+ )
1565
+ res[-1] = np.dot(v, self.sqrt_sw)
1566
+ return res
1567
+
1568
+ def _matmat(self, v):
1569
+ n_features = self.shape[0]
1570
+ res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)
1571
+ res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[
1572
+ :, None
1573
+ ] * self.sqrt_sw.dot(v)
1574
+ res[-1] = np.dot(self.sqrt_sw, v)
1575
+ return res
1576
+
1577
+
1578
+ class _IdentityRegressor:
1579
+ """Fake regressor which will directly output the prediction."""
1580
+
1581
+ def decision_function(self, y_predict):
1582
+ return y_predict
1583
+
1584
+ def predict(self, y_predict):
1585
+ return y_predict
1586
+
1587
+
1588
+ class _IdentityClassifier(LinearClassifierMixin):
1589
+ """Fake classifier which will directly output the prediction.
1590
+
1591
+ We inherit from LinearClassifierMixin to get the proper shape for the
1592
+ output `y`.
1593
+ """
1594
+
1595
+ def __init__(self, classes):
1596
+ self.classes_ = classes
1597
+
1598
+ def decision_function(self, y_predict):
1599
+ return y_predict
1600
+
1601
+
1602
+ class _RidgeGCV(LinearModel):
1603
+ """Ridge regression with built-in Leave-one-out Cross-Validation.
1604
+
1605
+ This class is not intended to be used directly. Use RidgeCV instead.
1606
+
1607
+ Notes
1608
+ -----
1609
+
1610
+ We want to solve (K + alpha*Id)c = y,
1611
+ where K = X X^T is the kernel matrix.
1612
+
1613
+ Let G = (K + alpha*Id).
1614
+
1615
+ Dual solution: c = G^-1y
1616
+ Primal solution: w = X^T c
1617
+
1618
+ Compute eigendecomposition K = Q V Q^T.
1619
+ Then G^-1 = Q (V + alpha*Id)^-1 Q^T,
1620
+ where (V + alpha*Id) is diagonal.
1621
+ It is thus inexpensive to inverse for many alphas.
1622
+
1623
+ Let loov be the vector of prediction values for each example
1624
+ when the model was fitted with all examples but this example.
1625
+
1626
+ loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1)
1627
+
1628
+ Let looe be the vector of prediction errors for each example
1629
+ when the model was fitted with all examples but this example.
1630
+
1631
+ looe = y - loov = c / diag(G^-1)
1632
+
1633
+ The best score (negative mean squared error or user-provided scoring) is
1634
+ stored in the `best_score_` attribute, and the selected hyperparameter in
1635
+ `alpha_`.
1636
+
1637
+ References
1638
+ ----------
1639
+ http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
1640
+ https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
1641
+ """
1642
+
1643
+ def __init__(
1644
+ self,
1645
+ alphas=(0.1, 1.0, 10.0),
1646
+ *,
1647
+ fit_intercept=True,
1648
+ scoring=None,
1649
+ copy_X=True,
1650
+ gcv_mode=None,
1651
+ store_cv_values=False,
1652
+ is_clf=False,
1653
+ alpha_per_target=False,
1654
+ ):
1655
+ self.alphas = alphas
1656
+ self.fit_intercept = fit_intercept
1657
+ self.scoring = scoring
1658
+ self.copy_X = copy_X
1659
+ self.gcv_mode = gcv_mode
1660
+ self.store_cv_values = store_cv_values
1661
+ self.is_clf = is_clf
1662
+ self.alpha_per_target = alpha_per_target
1663
+
1664
+ @staticmethod
1665
+ def _decomp_diag(v_prime, Q):
1666
+ # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
1667
+ return (v_prime * Q**2).sum(axis=-1)
1668
+
1669
+ @staticmethod
1670
+ def _diag_dot(D, B):
1671
+ # compute dot(diag(D), B)
1672
+ if len(B.shape) > 1:
1673
+ # handle case where B is > 1-d
1674
+ D = D[(slice(None),) + (np.newaxis,) * (len(B.shape) - 1)]
1675
+ return D * B
1676
+
1677
+ def _compute_gram(self, X, sqrt_sw):
1678
+ """Computes the Gram matrix XX^T with possible centering.
1679
+
1680
+ Parameters
1681
+ ----------
1682
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
1683
+ The preprocessed design matrix.
1684
+
1685
+ sqrt_sw : ndarray of shape (n_samples,)
1686
+ square roots of sample weights
1687
+
1688
+ Returns
1689
+ -------
1690
+ gram : ndarray of shape (n_samples, n_samples)
1691
+ The Gram matrix.
1692
+ X_mean : ndarray of shape (n_feature,)
1693
+ The weighted mean of ``X`` for each feature.
1694
+
1695
+ Notes
1696
+ -----
1697
+ When X is dense the centering has been done in preprocessing
1698
+ so the mean is 0 and we just compute XX^T.
1699
+
1700
+ When X is sparse it has not been centered in preprocessing, but it has
1701
+ been scaled by sqrt(sample weights).
1702
+
1703
+ When self.fit_intercept is False no centering is done.
1704
+
1705
+ The centered X is never actually computed because centering would break
1706
+ the sparsity of X.
1707
+ """
1708
+ center = self.fit_intercept and sparse.issparse(X)
1709
+ if not center:
1710
+ # in this case centering has been done in preprocessing
1711
+ # or we are not fitting an intercept.
1712
+ X_mean = np.zeros(X.shape[1], dtype=X.dtype)
1713
+ return safe_sparse_dot(X, X.T, dense_output=True), X_mean
1714
+ # X is sparse
1715
+ n_samples = X.shape[0]
1716
+ sample_weight_matrix = sparse.dia_matrix(
1717
+ (sqrt_sw, 0), shape=(n_samples, n_samples)
1718
+ )
1719
+ X_weighted = sample_weight_matrix.dot(X)
1720
+ X_mean, _ = mean_variance_axis(X_weighted, axis=0)
1721
+ X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
1722
+ X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)
1723
+ X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
1724
+ return (
1725
+ safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T,
1726
+ X_mean,
1727
+ )
1728
+
1729
+ def _compute_covariance(self, X, sqrt_sw):
1730
+ """Computes covariance matrix X^TX with possible centering.
1731
+
1732
+ Parameters
1733
+ ----------
1734
+ X : sparse matrix of shape (n_samples, n_features)
1735
+ The preprocessed design matrix.
1736
+
1737
+ sqrt_sw : ndarray of shape (n_samples,)
1738
+ square roots of sample weights
1739
+
1740
+ Returns
1741
+ -------
1742
+ covariance : ndarray of shape (n_features, n_features)
1743
+ The covariance matrix.
1744
+ X_mean : ndarray of shape (n_feature,)
1745
+ The weighted mean of ``X`` for each feature.
1746
+
1747
+ Notes
1748
+ -----
1749
+ Since X is sparse it has not been centered in preprocessing, but it has
1750
+ been scaled by sqrt(sample weights).
1751
+
1752
+ When self.fit_intercept is False no centering is done.
1753
+
1754
+ The centered X is never actually computed because centering would break
1755
+ the sparsity of X.
1756
+ """
1757
+ if not self.fit_intercept:
1758
+ # in this case centering has been done in preprocessing
1759
+ # or we are not fitting an intercept.
1760
+ X_mean = np.zeros(X.shape[1], dtype=X.dtype)
1761
+ return safe_sparse_dot(X.T, X, dense_output=True), X_mean
1762
+ # this function only gets called for sparse X
1763
+ n_samples = X.shape[0]
1764
+ sample_weight_matrix = sparse.dia_matrix(
1765
+ (sqrt_sw, 0), shape=(n_samples, n_samples)
1766
+ )
1767
+ X_weighted = sample_weight_matrix.dot(X)
1768
+ X_mean, _ = mean_variance_axis(X_weighted, axis=0)
1769
+ X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)
1770
+ weight_sum = sqrt_sw.dot(sqrt_sw)
1771
+ return (
1772
+ safe_sparse_dot(X.T, X, dense_output=True)
1773
+ - weight_sum * np.outer(X_mean, X_mean),
1774
+ X_mean,
1775
+ )
1776
+
1777
+ def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):
1778
+ """Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
1779
+ without explicitly centering X nor computing X.dot(A)
1780
+ when X is sparse.
1781
+
1782
+ Parameters
1783
+ ----------
1784
+ X : sparse matrix of shape (n_samples, n_features)
1785
+
1786
+ A : ndarray of shape (n_features, n_features)
1787
+
1788
+ X_mean : ndarray of shape (n_features,)
1789
+
1790
+ sqrt_sw : ndarray of shape (n_features,)
1791
+ square roots of sample weights
1792
+
1793
+ Returns
1794
+ -------
1795
+ diag : np.ndarray, shape (n_samples,)
1796
+ The computed diagonal.
1797
+ """
1798
+ intercept_col = scale = sqrt_sw
1799
+ batch_size = X.shape[1]
1800
+ diag = np.empty(X.shape[0], dtype=X.dtype)
1801
+ for start in range(0, X.shape[0], batch_size):
1802
+ batch = slice(start, min(X.shape[0], start + batch_size), 1)
1803
+ X_batch = np.empty(
1804
+ (X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype
1805
+ )
1806
+ if self.fit_intercept:
1807
+ X_batch[:, :-1] = X[batch].toarray() - X_mean * scale[batch][:, None]
1808
+ X_batch[:, -1] = intercept_col[batch]
1809
+ else:
1810
+ X_batch = X[batch].toarray()
1811
+ diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)
1812
+ return diag
1813
+
1814
+ def _eigen_decompose_gram(self, X, y, sqrt_sw):
1815
+ """Eigendecomposition of X.X^T, used when n_samples <= n_features."""
1816
+ # if X is dense it has already been centered in preprocessing
1817
+ K, X_mean = self._compute_gram(X, sqrt_sw)
1818
+ if self.fit_intercept:
1819
+ # to emulate centering X with sample weights,
1820
+ # ie removing the weighted average, we add a column
1821
+ # containing the square roots of the sample weights.
1822
+ # by centering, it is orthogonal to the other columns
1823
+ K += np.outer(sqrt_sw, sqrt_sw)
1824
+ eigvals, Q = linalg.eigh(K)
1825
+ QT_y = np.dot(Q.T, y)
1826
+ return X_mean, eigvals, Q, QT_y
1827
+
1828
+ def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):
1829
+ """Compute dual coefficients and diagonal of G^-1.
1830
+
1831
+ Used when we have a decomposition of X.X^T (n_samples <= n_features).
1832
+ """
1833
+ w = 1.0 / (eigvals + alpha)
1834
+ if self.fit_intercept:
1835
+ # the vector containing the square roots of the sample weights (1
1836
+ # when no sample weights) is the eigenvector of XX^T which
1837
+ # corresponds to the intercept; we cancel the regularization on
1838
+ # this dimension. the corresponding eigenvalue is
1839
+ # sum(sample_weight).
1840
+ normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
1841
+ intercept_dim = _find_smallest_angle(normalized_sw, Q)
1842
+ w[intercept_dim] = 0 # cancel regularization for the intercept
1843
+
1844
+ c = np.dot(Q, self._diag_dot(w, QT_y))
1845
+ G_inverse_diag = self._decomp_diag(w, Q)
1846
+ # handle case where y is 2-d
1847
+ if len(y.shape) != 1:
1848
+ G_inverse_diag = G_inverse_diag[:, np.newaxis]
1849
+ return G_inverse_diag, c
1850
+
1851
+ def _eigen_decompose_covariance(self, X, y, sqrt_sw):
1852
+ """Eigendecomposition of X^T.X, used when n_samples > n_features
1853
+ and X is sparse.
1854
+ """
1855
+ n_samples, n_features = X.shape
1856
+ cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)
1857
+ cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)
1858
+ if not self.fit_intercept:
1859
+ cov = cov[:-1, :-1]
1860
+ # to emulate centering X with sample weights,
1861
+ # ie removing the weighted average, we add a column
1862
+ # containing the square roots of the sample weights.
1863
+ # by centering, it is orthogonal to the other columns
1864
+ # when all samples have the same weight we add a column of 1
1865
+ else:
1866
+ cov[-1] = 0
1867
+ cov[:, -1] = 0
1868
+ cov[-1, -1] = sqrt_sw.dot(sqrt_sw)
1869
+ nullspace_dim = max(0, n_features - n_samples)
1870
+ eigvals, V = linalg.eigh(cov)
1871
+ # remove eigenvalues and vectors in the null space of X^T.X
1872
+ eigvals = eigvals[nullspace_dim:]
1873
+ V = V[:, nullspace_dim:]
1874
+ return X_mean, eigvals, V, X
1875
+
1876
+ def _solve_eigen_covariance_no_intercept(
1877
+ self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
1878
+ ):
1879
+ """Compute dual coefficients and diagonal of G^-1.
1880
+
1881
+ Used when we have a decomposition of X^T.X
1882
+ (n_samples > n_features and X is sparse), and not fitting an intercept.
1883
+ """
1884
+ w = 1 / (eigvals + alpha)
1885
+ A = (V * w).dot(V.T)
1886
+ AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))
1887
+ y_hat = safe_sparse_dot(X, AXy, dense_output=True)
1888
+ hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
1889
+ if len(y.shape) != 1:
1890
+ # handle case where y is 2-d
1891
+ hat_diag = hat_diag[:, np.newaxis]
1892
+ return (1 - hat_diag) / alpha, (y - y_hat) / alpha
1893
+
1894
+ def _solve_eigen_covariance_intercept(
1895
+ self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
1896
+ ):
1897
+ """Compute dual coefficients and diagonal of G^-1.
1898
+
1899
+ Used when we have a decomposition of X^T.X
1900
+ (n_samples > n_features and X is sparse),
1901
+ and we are fitting an intercept.
1902
+ """
1903
+ # the vector [0, 0, ..., 0, 1]
1904
+ # is the eigenvector of X^TX which
1905
+ # corresponds to the intercept; we cancel the regularization on
1906
+ # this dimension. the corresponding eigenvalue is
1907
+ # sum(sample_weight), e.g. n when uniform sample weights.
1908
+ intercept_sv = np.zeros(V.shape[0])
1909
+ intercept_sv[-1] = 1
1910
+ intercept_dim = _find_smallest_angle(intercept_sv, V)
1911
+ w = 1 / (eigvals + alpha)
1912
+ w[intercept_dim] = 1 / eigvals[intercept_dim]
1913
+ A = (V * w).dot(V.T)
1914
+ # add a column to X containing the square roots of sample weights
1915
+ X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)
1916
+ AXy = A.dot(X_op.T.dot(y))
1917
+ y_hat = X_op.dot(AXy)
1918
+ hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
1919
+ # return (1 - hat_diag), (y - y_hat)
1920
+ if len(y.shape) != 1:
1921
+ # handle case where y is 2-d
1922
+ hat_diag = hat_diag[:, np.newaxis]
1923
+ return (1 - hat_diag) / alpha, (y - y_hat) / alpha
1924
+
1925
+ def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
1926
+ """Compute dual coefficients and diagonal of G^-1.
1927
+
1928
+ Used when we have a decomposition of X^T.X
1929
+ (n_samples > n_features and X is sparse).
1930
+ """
1931
+ if self.fit_intercept:
1932
+ return self._solve_eigen_covariance_intercept(
1933
+ alpha, y, sqrt_sw, X_mean, eigvals, V, X
1934
+ )
1935
+ return self._solve_eigen_covariance_no_intercept(
1936
+ alpha, y, sqrt_sw, X_mean, eigvals, V, X
1937
+ )
1938
+
1939
+ def _svd_decompose_design_matrix(self, X, y, sqrt_sw):
1940
+ # X already centered
1941
+ X_mean = np.zeros(X.shape[1], dtype=X.dtype)
1942
+ if self.fit_intercept:
1943
+ # to emulate fit_intercept=True situation, add a column
1944
+ # containing the square roots of the sample weights
1945
+ # by centering, the other columns are orthogonal to that one
1946
+ intercept_column = sqrt_sw[:, None]
1947
+ X = np.hstack((X, intercept_column))
1948
+ U, singvals, _ = linalg.svd(X, full_matrices=0)
1949
+ singvals_sq = singvals**2
1950
+ UT_y = np.dot(U.T, y)
1951
+ return X_mean, singvals_sq, U, UT_y
1952
+
1953
+ def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):
1954
+ """Compute dual coefficients and diagonal of G^-1.
1955
+
1956
+ Used when we have an SVD decomposition of X
1957
+ (n_samples > n_features and X is dense).
1958
+ """
1959
+ w = ((singvals_sq + alpha) ** -1) - (alpha**-1)
1960
+ if self.fit_intercept:
1961
+ # detect intercept column
1962
+ normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
1963
+ intercept_dim = _find_smallest_angle(normalized_sw, U)
1964
+ # cancel the regularization for the intercept
1965
+ w[intercept_dim] = -(alpha**-1)
1966
+ c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y
1967
+ G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1)
1968
+ if len(y.shape) != 1:
1969
+ # handle case where y is 2-d
1970
+ G_inverse_diag = G_inverse_diag[:, np.newaxis]
1971
+ return G_inverse_diag, c
1972
+
1973
+ def fit(self, X, y, sample_weight=None):
1974
+ """Fit Ridge regression model with gcv.
1975
+
1976
+ Parameters
1977
+ ----------
1978
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
1979
+ Training data. Will be cast to float64 if necessary.
1980
+
1981
+ y : ndarray of shape (n_samples,) or (n_samples, n_targets)
1982
+ Target values. Will be cast to float64 if necessary.
1983
+
1984
+ sample_weight : float or ndarray of shape (n_samples,), default=None
1985
+ Individual weights for each sample. If given a float, every sample
1986
+ will have the same weight. Note that the scale of `sample_weight`
1987
+ has an impact on the loss; i.e. multiplying all weights by `k`
1988
+ is equivalent to setting `alpha / k`.
1989
+
1990
+ Returns
1991
+ -------
1992
+ self : object
1993
+ """
1994
+ X, y = self._validate_data(
1995
+ X,
1996
+ y,
1997
+ accept_sparse=["csr", "csc", "coo"],
1998
+ dtype=[np.float64],
1999
+ multi_output=True,
2000
+ y_numeric=True,
2001
+ )
2002
+
2003
+ # alpha_per_target cannot be used in classifier mode. All subclasses
2004
+ # of _RidgeGCV that are classifiers keep alpha_per_target at its
2005
+ # default value: False, so the condition below should never happen.
2006
+ assert not (self.is_clf and self.alpha_per_target)
2007
+
2008
+ if sample_weight is not None:
2009
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
2010
+
2011
+ self.alphas = np.asarray(self.alphas)
2012
+
2013
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
2014
+ X,
2015
+ y,
2016
+ fit_intercept=self.fit_intercept,
2017
+ copy=self.copy_X,
2018
+ sample_weight=sample_weight,
2019
+ )
2020
+
2021
+ gcv_mode = _check_gcv_mode(X, self.gcv_mode)
2022
+
2023
+ if gcv_mode == "eigen":
2024
+ decompose = self._eigen_decompose_gram
2025
+ solve = self._solve_eigen_gram
2026
+ elif gcv_mode == "svd":
2027
+ if sparse.issparse(X):
2028
+ decompose = self._eigen_decompose_covariance
2029
+ solve = self._solve_eigen_covariance
2030
+ else:
2031
+ decompose = self._svd_decompose_design_matrix
2032
+ solve = self._solve_svd_design_matrix
2033
+
2034
+ n_samples = X.shape[0]
2035
+
2036
+ if sample_weight is not None:
2037
+ X, y, sqrt_sw = _rescale_data(X, y, sample_weight)
2038
+ else:
2039
+ sqrt_sw = np.ones(n_samples, dtype=X.dtype)
2040
+
2041
+ X_mean, *decomposition = decompose(X, y, sqrt_sw)
2042
+
2043
+ scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
2044
+ error = scorer is None
2045
+
2046
+ n_y = 1 if len(y.shape) == 1 else y.shape[1]
2047
+ n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
2048
+
2049
+ if self.store_cv_values:
2050
+ self.cv_values_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype)
2051
+
2052
+ best_coef, best_score, best_alpha = None, None, None
2053
+
2054
+ for i, alpha in enumerate(np.atleast_1d(self.alphas)):
2055
+ G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition)
2056
+ if error:
2057
+ squared_errors = (c / G_inverse_diag) ** 2
2058
+ if self.alpha_per_target:
2059
+ alpha_score = -squared_errors.mean(axis=0)
2060
+ else:
2061
+ alpha_score = -squared_errors.mean()
2062
+ if self.store_cv_values:
2063
+ self.cv_values_[:, i] = squared_errors.ravel()
2064
+ else:
2065
+ predictions = y - (c / G_inverse_diag)
2066
+ if self.store_cv_values:
2067
+ self.cv_values_[:, i] = predictions.ravel()
2068
+
2069
+ if self.is_clf:
2070
+ identity_estimator = _IdentityClassifier(classes=np.arange(n_y))
2071
+ alpha_score = scorer(
2072
+ identity_estimator, predictions, y.argmax(axis=1)
2073
+ )
2074
+ else:
2075
+ identity_estimator = _IdentityRegressor()
2076
+ if self.alpha_per_target:
2077
+ alpha_score = np.array(
2078
+ [
2079
+ scorer(identity_estimator, predictions[:, j], y[:, j])
2080
+ for j in range(n_y)
2081
+ ]
2082
+ )
2083
+ else:
2084
+ alpha_score = scorer(
2085
+ identity_estimator, predictions.ravel(), y.ravel()
2086
+ )
2087
+
2088
+ # Keep track of the best model
2089
+ if best_score is None:
2090
+ # initialize
2091
+ if self.alpha_per_target and n_y > 1:
2092
+ best_coef = c
2093
+ best_score = np.atleast_1d(alpha_score)
2094
+ best_alpha = np.full(n_y, alpha)
2095
+ else:
2096
+ best_coef = c
2097
+ best_score = alpha_score
2098
+ best_alpha = alpha
2099
+ else:
2100
+ # update
2101
+ if self.alpha_per_target and n_y > 1:
2102
+ to_update = alpha_score > best_score
2103
+ best_coef[:, to_update] = c[:, to_update]
2104
+ best_score[to_update] = alpha_score[to_update]
2105
+ best_alpha[to_update] = alpha
2106
+ elif alpha_score > best_score:
2107
+ best_coef, best_score, best_alpha = c, alpha_score, alpha
2108
+
2109
+ self.alpha_ = best_alpha
2110
+ self.best_score_ = best_score
2111
+ self.dual_coef_ = best_coef
2112
+ self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
2113
+
2114
+ if sparse.issparse(X):
2115
+ X_offset = X_mean * X_scale
2116
+ else:
2117
+ X_offset += X_mean * X_scale
2118
+ self._set_intercept(X_offset, y_offset, X_scale)
2119
+
2120
+ if self.store_cv_values:
2121
+ if len(y.shape) == 1:
2122
+ cv_values_shape = n_samples, n_alphas
2123
+ else:
2124
+ cv_values_shape = n_samples, n_y, n_alphas
2125
+ self.cv_values_ = self.cv_values_.reshape(cv_values_shape)
2126
+
2127
+ return self
2128
+
2129
+
2130
+ class _BaseRidgeCV(LinearModel):
2131
+ _parameter_constraints: dict = {
2132
+ "alphas": ["array-like", Interval(Real, 0, None, closed="neither")],
2133
+ "fit_intercept": ["boolean"],
2134
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
2135
+ "cv": ["cv_object"],
2136
+ "gcv_mode": [StrOptions({"auto", "svd", "eigen"}), None],
2137
+ "store_cv_values": ["boolean"],
2138
+ "alpha_per_target": ["boolean"],
2139
+ }
2140
+
2141
+ def __init__(
2142
+ self,
2143
+ alphas=(0.1, 1.0, 10.0),
2144
+ *,
2145
+ fit_intercept=True,
2146
+ scoring=None,
2147
+ cv=None,
2148
+ gcv_mode=None,
2149
+ store_cv_values=False,
2150
+ alpha_per_target=False,
2151
+ ):
2152
+ self.alphas = alphas
2153
+ self.fit_intercept = fit_intercept
2154
+ self.scoring = scoring
2155
+ self.cv = cv
2156
+ self.gcv_mode = gcv_mode
2157
+ self.store_cv_values = store_cv_values
2158
+ self.alpha_per_target = alpha_per_target
2159
+
2160
+ def fit(self, X, y, sample_weight=None):
2161
+ """Fit Ridge regression model with cv.
2162
+
2163
+ Parameters
2164
+ ----------
2165
+ X : ndarray of shape (n_samples, n_features)
2166
+ Training data. If using GCV, will be cast to float64
2167
+ if necessary.
2168
+
2169
+ y : ndarray of shape (n_samples,) or (n_samples, n_targets)
2170
+ Target values. Will be cast to X's dtype if necessary.
2171
+
2172
+ sample_weight : float or ndarray of shape (n_samples,), default=None
2173
+ Individual weights for each sample. If given a float, every sample
2174
+ will have the same weight.
2175
+
2176
+ Returns
2177
+ -------
2178
+ self : object
2179
+ Fitted estimator.
2180
+
2181
+ Notes
2182
+ -----
2183
+ When sample_weight is provided, the selected hyperparameter may depend
2184
+ on whether we use leave-one-out cross-validation (cv=None or cv='auto')
2185
+ or another form of cross-validation, because only leave-one-out
2186
+ cross-validation takes the sample weights into account when computing
2187
+ the validation score.
2188
+ """
2189
+ cv = self.cv
2190
+
2191
+ check_scalar_alpha = partial(
2192
+ check_scalar,
2193
+ target_type=numbers.Real,
2194
+ min_val=0.0,
2195
+ include_boundaries="neither",
2196
+ )
2197
+
2198
+ if isinstance(self.alphas, (np.ndarray, list, tuple)):
2199
+ n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
2200
+ if n_alphas != 1:
2201
+ for index, alpha in enumerate(self.alphas):
2202
+ alpha = check_scalar_alpha(alpha, f"alphas[{index}]")
2203
+ else:
2204
+ self.alphas[0] = check_scalar_alpha(self.alphas[0], "alphas")
2205
+ alphas = np.asarray(self.alphas)
2206
+
2207
+ if cv is None:
2208
+ estimator = _RidgeGCV(
2209
+ alphas,
2210
+ fit_intercept=self.fit_intercept,
2211
+ scoring=self.scoring,
2212
+ gcv_mode=self.gcv_mode,
2213
+ store_cv_values=self.store_cv_values,
2214
+ is_clf=is_classifier(self),
2215
+ alpha_per_target=self.alpha_per_target,
2216
+ )
2217
+ estimator.fit(X, y, sample_weight=sample_weight)
2218
+ self.alpha_ = estimator.alpha_
2219
+ self.best_score_ = estimator.best_score_
2220
+ if self.store_cv_values:
2221
+ self.cv_values_ = estimator.cv_values_
2222
+ else:
2223
+ if self.store_cv_values:
2224
+ raise ValueError("cv!=None and store_cv_values=True are incompatible")
2225
+ if self.alpha_per_target:
2226
+ raise ValueError("cv!=None and alpha_per_target=True are incompatible")
2227
+
2228
+ parameters = {"alpha": alphas}
2229
+ solver = "sparse_cg" if sparse.issparse(X) else "auto"
2230
+ model = RidgeClassifier if is_classifier(self) else Ridge
2231
+ gs = GridSearchCV(
2232
+ model(
2233
+ fit_intercept=self.fit_intercept,
2234
+ solver=solver,
2235
+ ),
2236
+ parameters,
2237
+ cv=cv,
2238
+ scoring=self.scoring,
2239
+ )
2240
+ gs.fit(X, y, sample_weight=sample_weight)
2241
+ estimator = gs.best_estimator_
2242
+ self.alpha_ = gs.best_estimator_.alpha
2243
+ self.best_score_ = gs.best_score_
2244
+
2245
+ self.coef_ = estimator.coef_
2246
+ self.intercept_ = estimator.intercept_
2247
+ self.n_features_in_ = estimator.n_features_in_
2248
+ if hasattr(estimator, "feature_names_in_"):
2249
+ self.feature_names_in_ = estimator.feature_names_in_
2250
+
2251
+ return self
2252
+
2253
+
2254
+ class RidgeCV(
2255
+ _RoutingNotSupportedMixin, MultiOutputMixin, RegressorMixin, _BaseRidgeCV
2256
+ ):
2257
+ """Ridge regression with built-in cross-validation.
2258
+
2259
+ See glossary entry for :term:`cross-validation estimator`.
2260
+
2261
+ By default, it performs efficient Leave-One-Out Cross-Validation.
2262
+
2263
+ Read more in the :ref:`User Guide <ridge_regression>`.
2264
+
2265
+ Parameters
2266
+ ----------
2267
+ alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0)
2268
+ Array of alpha values to try.
2269
+ Regularization strength; must be a positive float. Regularization
2270
+ improves the conditioning of the problem and reduces the variance of
2271
+ the estimates. Larger values specify stronger regularization.
2272
+ Alpha corresponds to ``1 / (2C)`` in other linear models such as
2273
+ :class:`~sklearn.linear_model.LogisticRegression` or
2274
+ :class:`~sklearn.svm.LinearSVC`.
2275
+ If using Leave-One-Out cross-validation, alphas must be positive.
2276
+
2277
+ fit_intercept : bool, default=True
2278
+ Whether to calculate the intercept for this model. If set
2279
+ to false, no intercept will be used in calculations
2280
+ (i.e. data is expected to be centered).
2281
+
2282
+ scoring : str, callable, default=None
2283
+ A string (see model evaluation documentation) or
2284
+ a scorer callable object / function with signature
2285
+ ``scorer(estimator, X, y)``.
2286
+ If None, the negative mean squared error if cv is 'auto' or None
2287
+ (i.e. when using leave-one-out cross-validation), and r2 score
2288
+ otherwise.
2289
+
2290
+ cv : int, cross-validation generator or an iterable, default=None
2291
+ Determines the cross-validation splitting strategy.
2292
+ Possible inputs for cv are:
2293
+
2294
+ - None, to use the efficient Leave-One-Out cross-validation
2295
+ - integer, to specify the number of folds.
2296
+ - :term:`CV splitter`,
2297
+ - An iterable yielding (train, test) splits as arrays of indices.
2298
+
2299
+ For integer/None inputs, if ``y`` is binary or multiclass,
2300
+ :class:`~sklearn.model_selection.StratifiedKFold` is used, else,
2301
+ :class:`~sklearn.model_selection.KFold` is used.
2302
+
2303
+ Refer :ref:`User Guide <cross_validation>` for the various
2304
+ cross-validation strategies that can be used here.
2305
+
2306
+ gcv_mode : {'auto', 'svd', 'eigen'}, default='auto'
2307
+ Flag indicating which strategy to use when performing
2308
+ Leave-One-Out Cross-Validation. Options are::
2309
+
2310
+ 'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen'
2311
+ 'svd' : force use of singular value decomposition of X when X is
2312
+ dense, eigenvalue decomposition of X^T.X when X is sparse.
2313
+ 'eigen' : force computation via eigendecomposition of X.X^T
2314
+
2315
+ The 'auto' mode is the default and is intended to pick the cheaper
2316
+ option of the two depending on the shape of the training data.
2317
+
2318
+ store_cv_values : bool, default=False
2319
+ Flag indicating if the cross-validation values corresponding to
2320
+ each alpha should be stored in the ``cv_values_`` attribute (see
2321
+ below). This flag is only compatible with ``cv=None`` (i.e. using
2322
+ Leave-One-Out Cross-Validation).
2323
+
2324
+ alpha_per_target : bool, default=False
2325
+ Flag indicating whether to optimize the alpha value (picked from the
2326
+ `alphas` parameter list) for each target separately (for multi-output
2327
+ settings: multiple prediction targets). When set to `True`, after
2328
+ fitting, the `alpha_` attribute will contain a value for each target.
2329
+ When set to `False`, a single alpha is used for all targets.
2330
+
2331
+ .. versionadded:: 0.24
2332
+
2333
+ Attributes
2334
+ ----------
2335
+ cv_values_ : ndarray of shape (n_samples, n_alphas) or \
2336
+ shape (n_samples, n_targets, n_alphas), optional
2337
+ Cross-validation values for each alpha (only available if
2338
+ ``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been
2339
+ called, this attribute will contain the mean squared errors if
2340
+ `scoring is None` otherwise it will contain standardized per point
2341
+ prediction values.
2342
+
2343
+ coef_ : ndarray of shape (n_features) or (n_targets, n_features)
2344
+ Weight vector(s).
2345
+
2346
+ intercept_ : float or ndarray of shape (n_targets,)
2347
+ Independent term in decision function. Set to 0.0 if
2348
+ ``fit_intercept = False``.
2349
+
2350
+ alpha_ : float or ndarray of shape (n_targets,)
2351
+ Estimated regularization parameter, or, if ``alpha_per_target=True``,
2352
+ the estimated regularization parameter for each target.
2353
+
2354
+ best_score_ : float or ndarray of shape (n_targets,)
2355
+ Score of base estimator with best alpha, or, if
2356
+ ``alpha_per_target=True``, a score for each target.
2357
+
2358
+ .. versionadded:: 0.23
2359
+
2360
+ n_features_in_ : int
2361
+ Number of features seen during :term:`fit`.
2362
+
2363
+ .. versionadded:: 0.24
2364
+
2365
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
2366
+ Names of features seen during :term:`fit`. Defined only when `X`
2367
+ has feature names that are all strings.
2368
+
2369
+ .. versionadded:: 1.0
2370
+
2371
+ See Also
2372
+ --------
2373
+ Ridge : Ridge regression.
2374
+ RidgeClassifier : Classifier based on ridge regression on {-1, 1} labels.
2375
+ RidgeClassifierCV : Ridge classifier with built-in cross validation.
2376
+
2377
+ Examples
2378
+ --------
2379
+ >>> from sklearn.datasets import load_diabetes
2380
+ >>> from sklearn.linear_model import RidgeCV
2381
+ >>> X, y = load_diabetes(return_X_y=True)
2382
+ >>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
2383
+ >>> clf.score(X, y)
2384
+ 0.5166...
2385
+ """
2386
+
2387
+ @_fit_context(prefer_skip_nested_validation=True)
2388
+ def fit(self, X, y, sample_weight=None):
2389
+ """Fit Ridge regression model with cv.
2390
+
2391
+ Parameters
2392
+ ----------
2393
+ X : ndarray of shape (n_samples, n_features)
2394
+ Training data. If using GCV, will be cast to float64
2395
+ if necessary.
2396
+
2397
+ y : ndarray of shape (n_samples,) or (n_samples, n_targets)
2398
+ Target values. Will be cast to X's dtype if necessary.
2399
+
2400
+ sample_weight : float or ndarray of shape (n_samples,), default=None
2401
+ Individual weights for each sample. If given a float, every sample
2402
+ will have the same weight.
2403
+
2404
+ Returns
2405
+ -------
2406
+ self : object
2407
+ Fitted estimator.
2408
+
2409
+ Notes
2410
+ -----
2411
+ When sample_weight is provided, the selected hyperparameter may depend
2412
+ on whether we use leave-one-out cross-validation (cv=None or cv='auto')
2413
+ or another form of cross-validation, because only leave-one-out
2414
+ cross-validation takes the sample weights into account when computing
2415
+ the validation score.
2416
+ """
2417
+ _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
2418
+ super().fit(X, y, sample_weight=sample_weight)
2419
+ return self
2420
+
2421
+
2422
+ class RidgeClassifierCV(_RoutingNotSupportedMixin, _RidgeClassifierMixin, _BaseRidgeCV):
2423
+ """Ridge classifier with built-in cross-validation.
2424
+
2425
+ See glossary entry for :term:`cross-validation estimator`.
2426
+
2427
+ By default, it performs Leave-One-Out Cross-Validation. Currently,
2428
+ only the n_features > n_samples case is handled efficiently.
2429
+
2430
+ Read more in the :ref:`User Guide <ridge_regression>`.
2431
+
2432
+ Parameters
2433
+ ----------
2434
+ alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0)
2435
+ Array of alpha values to try.
2436
+ Regularization strength; must be a positive float. Regularization
2437
+ improves the conditioning of the problem and reduces the variance of
2438
+ the estimates. Larger values specify stronger regularization.
2439
+ Alpha corresponds to ``1 / (2C)`` in other linear models such as
2440
+ :class:`~sklearn.linear_model.LogisticRegression` or
2441
+ :class:`~sklearn.svm.LinearSVC`.
2442
+
2443
+ fit_intercept : bool, default=True
2444
+ Whether to calculate the intercept for this model. If set
2445
+ to false, no intercept will be used in calculations
2446
+ (i.e. data is expected to be centered).
2447
+
2448
+ scoring : str, callable, default=None
2449
+ A string (see model evaluation documentation) or
2450
+ a scorer callable object / function with signature
2451
+ ``scorer(estimator, X, y)``.
2452
+
2453
+ cv : int, cross-validation generator or an iterable, default=None
2454
+ Determines the cross-validation splitting strategy.
2455
+ Possible inputs for cv are:
2456
+
2457
+ - None, to use the efficient Leave-One-Out cross-validation
2458
+ - integer, to specify the number of folds.
2459
+ - :term:`CV splitter`,
2460
+ - An iterable yielding (train, test) splits as arrays of indices.
2461
+
2462
+ Refer :ref:`User Guide <cross_validation>` for the various
2463
+ cross-validation strategies that can be used here.
2464
+
2465
+ class_weight : dict or 'balanced', default=None
2466
+ Weights associated with classes in the form ``{class_label: weight}``.
2467
+ If not given, all classes are supposed to have weight one.
2468
+
2469
+ The "balanced" mode uses the values of y to automatically adjust
2470
+ weights inversely proportional to class frequencies in the input data
2471
+ as ``n_samples / (n_classes * np.bincount(y))``.
2472
+
2473
+ store_cv_values : bool, default=False
2474
+ Flag indicating if the cross-validation values corresponding to
2475
+ each alpha should be stored in the ``cv_values_`` attribute (see
2476
+ below). This flag is only compatible with ``cv=None`` (i.e. using
2477
+ Leave-One-Out Cross-Validation).
2478
+
2479
+ Attributes
2480
+ ----------
2481
+ cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional
2482
+ Cross-validation values for each alpha (only if ``store_cv_values=True`` and
2483
+ ``cv=None``). After ``fit()`` has been called, this attribute will
2484
+ contain the mean squared errors if `scoring is None` otherwise it
2485
+ will contain standardized per point prediction values.
2486
+
2487
+ coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)
2488
+ Coefficient of the features in the decision function.
2489
+
2490
+ ``coef_`` is of shape (1, n_features) when the given problem is binary.
2491
+
2492
+ intercept_ : float or ndarray of shape (n_targets,)
2493
+ Independent term in decision function. Set to 0.0 if
2494
+ ``fit_intercept = False``.
2495
+
2496
+ alpha_ : float
2497
+ Estimated regularization parameter.
2498
+
2499
+ best_score_ : float
2500
+ Score of base estimator with best alpha.
2501
+
2502
+ .. versionadded:: 0.23
2503
+
2504
+ classes_ : ndarray of shape (n_classes,)
2505
+ The classes labels.
2506
+
2507
+ n_features_in_ : int
2508
+ Number of features seen during :term:`fit`.
2509
+
2510
+ .. versionadded:: 0.24
2511
+
2512
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
2513
+ Names of features seen during :term:`fit`. Defined only when `X`
2514
+ has feature names that are all strings.
2515
+
2516
+ .. versionadded:: 1.0
2517
+
2518
+ See Also
2519
+ --------
2520
+ Ridge : Ridge regression.
2521
+ RidgeClassifier : Ridge classifier.
2522
+ RidgeCV : Ridge regression with built-in cross validation.
2523
+
2524
+ Notes
2525
+ -----
2526
+ For multi-class classification, n_class classifiers are trained in
2527
+ a one-versus-all approach. Concretely, this is implemented by taking
2528
+ advantage of the multi-variate response support in Ridge.
2529
+
2530
+ Examples
2531
+ --------
2532
+ >>> from sklearn.datasets import load_breast_cancer
2533
+ >>> from sklearn.linear_model import RidgeClassifierCV
2534
+ >>> X, y = load_breast_cancer(return_X_y=True)
2535
+ >>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
2536
+ >>> clf.score(X, y)
2537
+ 0.9630...
2538
+ """
2539
+
2540
+ _parameter_constraints: dict = {
2541
+ **_BaseRidgeCV._parameter_constraints,
2542
+ "class_weight": [dict, StrOptions({"balanced"}), None],
2543
+ }
2544
+ for param in ("gcv_mode", "alpha_per_target"):
2545
+ _parameter_constraints.pop(param)
2546
+
2547
+ def __init__(
2548
+ self,
2549
+ alphas=(0.1, 1.0, 10.0),
2550
+ *,
2551
+ fit_intercept=True,
2552
+ scoring=None,
2553
+ cv=None,
2554
+ class_weight=None,
2555
+ store_cv_values=False,
2556
+ ):
2557
+ super().__init__(
2558
+ alphas=alphas,
2559
+ fit_intercept=fit_intercept,
2560
+ scoring=scoring,
2561
+ cv=cv,
2562
+ store_cv_values=store_cv_values,
2563
+ )
2564
+ self.class_weight = class_weight
2565
+
2566
+ @_fit_context(prefer_skip_nested_validation=True)
2567
+ def fit(self, X, y, sample_weight=None):
2568
+ """Fit Ridge classifier with cv.
2569
+
2570
+ Parameters
2571
+ ----------
2572
+ X : ndarray of shape (n_samples, n_features)
2573
+ Training vectors, where `n_samples` is the number of samples
2574
+ and `n_features` is the number of features. When using GCV,
2575
+ will be cast to float64 if necessary.
2576
+
2577
+ y : ndarray of shape (n_samples,)
2578
+ Target values. Will be cast to X's dtype if necessary.
2579
+
2580
+ sample_weight : float or ndarray of shape (n_samples,), default=None
2581
+ Individual weights for each sample. If given a float, every sample
2582
+ will have the same weight.
2583
+
2584
+ Returns
2585
+ -------
2586
+ self : object
2587
+ Fitted estimator.
2588
+ """
2589
+ _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
2590
+ # `RidgeClassifier` does not accept "sag" or "saga" solver and thus support
2591
+ # csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept
2592
+ # all sparse format.
2593
+ X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen")
2594
+
2595
+ # If cv is None, gcv mode will be used and we used the binarized Y
2596
+ # since y will not be binarized in _RidgeGCV estimator.
2597
+ # If cv is not None, a GridSearchCV with some RidgeClassifier
2598
+ # estimators are used where y will be binarized. Thus, we pass y
2599
+ # instead of the binarized Y.
2600
+ target = Y if self.cv is None else y
2601
+ super().fit(X, target, sample_weight=sample_weight)
2602
+ return self
2603
+
2604
+ def _more_tags(self):
2605
+ return {
2606
+ "multilabel": True,
2607
+ "_xfail_checks": {
2608
+ "check_sample_weights_invariance": (
2609
+ "zero sample_weight is not equivalent to removing samples"
2610
+ ),
2611
+ },
2612
+ }
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (307 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc ADDED
Binary file (9.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc ADDED
Binary file (7.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc ADDED
Binary file (47.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc ADDED
Binary file (7.61 kB). View file